From 7e3dfc0931946b15ef930cc66b4947e1bd3add72 Mon Sep 17 00:00:00 2001 From: ierandra Date: Thu, 12 Sep 2024 18:23:42 +0300 Subject: [PATCH 01/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store --- .../oak/commons/FixturesHelper.java | 2 +- .../jackrabbit/oak/NodeStoreFixtures.java | 6 + .../oak/spi/state/NodeStoreTest.java | 2 +- oak-parent/pom.xml | 5 + .../oak/fixture/SegmentTarFixture.java | 6 +- .../run/cli/SegmentTarFixtureProvider.java | 8 +- oak-run-elastic/pom.xml | 2 +- .../AzureSegmentStoreExplorerBackend.java | 10 +- .../oak/run/FileStoreDiffCommand.java | 6 +- .../oak/run/DataStoreCommandTest.java | 12 +- oak-segment-azure/pom.xml | 40 +- .../segment/azure/AzureArchiveManager.java | 182 +++--- .../oak/segment/azure/AzureGCJournalFile.java | 29 +- .../azure/AzureHttpRequestLoggingPolicy.java | 52 ++ .../oak/segment/azure/AzureJournalFile.java | 137 ++-- .../oak/segment/azure/AzureManifestFile.java | 23 +- .../oak/segment/azure/AzurePersistence.java | 116 ++-- .../azure/AzurePersistenceManager.java | 197 ++++++ .../segment/azure/AzureRepositoryLock.java | 59 +- .../azure/AzureSegmentArchiveReader.java | 60 +- .../azure/AzureSegmentArchiveWriter.java | 75 +-- .../azure/AzureSegmentStoreService.java | 144 +---- .../oak/segment/azure/AzureUtilities.java | 131 +--- .../oak/segment/azure/Configuration.java | 2 +- .../oak/segment/azure/ReverseFileReader.java | 36 +- .../oak/segment/azure/tool/AzureCheck.java | 14 +- .../oak/segment/azure/tool/AzureCompact.java | 20 +- .../oak/segment/azure/tool/SegmentCopy.java | 16 +- .../azure/tool/SegmentStoreMigrator.java | 6 +- .../oak/segment/azure/tool/ToolUtils.java | 29 +- .../azure/util/AzureRequestOptions.java | 85 ++- .../azure/util/AzureRequestOptionsV8.java | 96 +++ .../azure/v8/AzureArchiveManagerV8.java | 330 ++++++++++ .../azure/v8/AzureGCJournalFileV8.java | 75 +++ .../segment/azure/v8/AzureJournalFileV8.java | 328 ++++++++++ .../segment/azure/v8/AzureManifestFileV8.java | 78 +++ .../segment/azure/v8/AzurePersistenceV8.java | 158 +++++ .../azure/v8/AzureRepositoryLockV8.java | 197 ++++++ .../azure/v8/AzureSegmentArchiveReaderV8.java | 112 ++++ .../azure/v8/AzureSegmentArchiveWriterV8.java | 138 ++++ .../azure/v8/AzureSegmentStoreServiceV8.java | 155 +++++ .../AzureStorageCredentialManagerV8.java} | 14 +- .../segment/azure/v8/AzureUtilitiesV8.java | 196 ++++++ .../segment/azure/v8/ReverseFileReaderV8.java | 119 ++++ ...entCopyAzureServicePrincipalToTarTest.java | 18 +- .../azure/tool/SegmentCopyTestBase.java | 4 +- .../azure/AzureArchiveManagerTest.java | 209 ++++--- .../oak/segment/azure/AzureGCJournalTest.java | 18 +- .../AzureHttpRequestLoggingTestingPolicy.java | 31 + .../segment/azure/AzureJournalFileTest.java | 38 +- .../segment/azure/AzureManifestFileTest.java | 18 +- .../segment/azure/AzureReadSegmentTest.java | 37 +- .../azure/AzureRepositoryLockTest.java | 83 ++- .../azure/AzureSegmentArchiveWriterTest.java | 37 +- .../azure/AzureSegmentStoreServiceTest.java | 131 ++-- .../oak/segment/azure/AzureTarFileTest.java | 21 +- .../oak/segment/azure/AzureTarFilesTest.java | 20 +- .../oak/segment/azure/AzureTarWriterTest.java | 18 +- .../oak/segment/azure/AzuriteDockerRule.java | 135 ++++ .../segment/azure/MockAzureHttpResponse.java | 62 ++ .../azure/fixture/SegmentAzureFixture.java | 61 +- .../azure/fixture/SegmentAzureFixtureV8.java | 105 ++++ .../azure/journal/AzureJournalReaderTest.java | 34 +- .../azure/journal/AzureTarRevisionsTest.java | 21 +- .../azure/journal/ReverseFileReaderTest.java | 51 +- .../journal/v8/AzureJournalReaderV8Test.java | 57 ++ .../journal/v8/AzureTarRevisionsV8Test.java | 52 ++ .../journal/v8/ReverseFileReaderV8Test.java | 115 ++++ .../oak/segment/azure/tool/ToolUtilsTest.java | 34 +- ...st.java => AzureRequestOptionsV8Test.java} | 34 +- .../azure/v8/AzureArchiveManagerV8Test.java | 587 ++++++++++++++++++ .../azure/v8/AzureGCJournalV8Test.java | 65 ++ .../AzureJournalFileV8ConcurrencyIT.java} | 10 +- .../azure/v8/AzureJournalFileV8Test.java | 215 +++++++ .../azure/v8/AzureManifestFileV8Test.java | 64 ++ .../azure/v8/AzureReadSegmentV8Test.java | 123 ++++ .../azure/v8/AzureRepositoryLockV8Test.java | 171 +++++ .../v8/AzureSegmentArchiveWriterV8Test.java | 223 +++++++ .../v8/AzureSegmentStoreServiceV8Test.java | 286 +++++++++ .../segment/azure/v8/AzureTarFileV8Test.java | 70 +++ .../segment/azure/v8/AzureTarFilesV8Test.java | 58 ++ .../azure/v8/AzureTarWriterV8Test.java | 71 +++ .../split/SplitPersistenceBlobTest.java | 36 +- .../split/SplitPersistenceTest.java | 18 +- .../split/v8/SplitPersistenceBlobV8Test.java | 164 +++++ .../split/v8/SplitPersistenceV8Test.java | 144 +++++ .../upgrade/cli/node/SegmentAzureFactory.java | 26 +- ...TarToSegmentAzureServicePrincipalTest.java | 6 +- .../SegmentAzureNodeStoreContainer.java | 10 +- ...ureServicePrincipalNodeStoreContainer.java | 38 +- .../cli/node/SegmentAzureFactoryTest.java | 28 +- 91 files changed, 6130 insertions(+), 1235 deletions(-) create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java rename oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/{AzureStorageCredentialManager.java => v8/AzureStorageCredentialManagerV8.java} (93%) create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java rename oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/{AzureRequestOptionsTest.java => AzureRequestOptionsV8Test.java} (63%) create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java rename oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/{AzureJournalFileConcurrencyIT.java => v8/AzureJournalFileV8ConcurrencyIT.java} (94%) create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java diff --git a/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java b/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java index b2f2f075213..b1e36a8fd9e 100644 --- a/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java +++ b/oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java @@ -53,7 +53,7 @@ private FixturesHelper() { } * default fixtures when no {@code nsfixtures} system property is provided */ public enum Fixture { - DOCUMENT_NS, @Deprecated SEGMENT_MK, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, SEGMENT_AWS, SEGMENT_AZURE, COMPOSITE_SEGMENT, COMPOSITE_MEM, COW_DOCUMENT + DOCUMENT_NS, @Deprecated SEGMENT_MK, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, SEGMENT_AWS, SEGMENT_AZURE_V8, SEGMENT_AZURE, COMPOSITE_SEGMENT, COMPOSITE_MEM, COW_DOCUMENT } private static final Set FIXTURES; diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java index 6afe976f6d8..b482254c7d1 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java @@ -33,6 +33,7 @@ import org.apache.jackrabbit.oak.composite.CompositeSegmentStoreFixture; import org.apache.jackrabbit.oak.segment.aws.fixture.SegmentAwsFixture; import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixture; +import org.apache.jackrabbit.oak.segment.azure.fixture.SegmentAzureFixtureV8; import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; public class NodeStoreFixtures { @@ -43,6 +44,8 @@ public class NodeStoreFixtures { public static final NodeStoreFixture SEGMENT_AWS = new SegmentAwsFixture(); + public static final NodeStoreFixture SEGMENT_AZURE_V8 = new SegmentAzureFixtureV8(); + public static final NodeStoreFixture SEGMENT_AZURE = new SegmentAzureFixture(); public static final NodeStoreFixture DOCUMENT_NS = new DocumentMongoFixture(); @@ -80,6 +83,9 @@ public static Collection asJunitParameters(Set if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE)) { configuredFixtures.add(SEGMENT_AZURE); } + if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_AZURE_V8)) { + configuredFixtures.add(SEGMENT_AZURE_V8); + } if (fixtures.contains(FixturesHelper.Fixture.COMPOSITE_SEGMENT)) { configuredFixtures.add(COMPOSITE_SEGMENT); } diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java index adb6bd9ca5b..ae5e1ae4319 100644 --- a/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java +++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java @@ -459,7 +459,7 @@ public void moveToDescendant() { if (fixture == NodeStoreFixtures.SEGMENT_TAR || fixture == NodeStoreFixtures.MEMORY_NS || fixture == NodeStoreFixtures.COMPOSITE_MEM || fixture == NodeStoreFixtures.COMPOSITE_SEGMENT || fixture == NodeStoreFixtures.COW_DOCUMENT || fixture == NodeStoreFixtures.SEGMENT_AWS - || fixture == NodeStoreFixtures.SEGMENT_AZURE) { + || fixture == NodeStoreFixtures.SEGMENT_AZURE_V8 || fixture == NodeStoreFixtures.SEGMENT_AZURE) { assertTrue(x.moveTo(x, "xx")); assertFalse(x.exists()); assertFalse(test.hasChildNode("x")); diff --git a/oak-parent/pom.xml b/oak-parent/pom.xml index d3fae4031f8..d2e9ae75acd 100644 --- a/oak-parent/pom.xml +++ b/oak-parent/pom.xml @@ -693,6 +693,11 @@ jackson-dataformat-smile ${jackson.version} + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + ${jackson.version} + org.apache.httpcomponents httpclient diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java index 062c1d8666a..9a539cd3cc0 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java @@ -44,7 +44,7 @@ import org.apache.jackrabbit.oak.segment.aws.AwsContext; import org.apache.jackrabbit.oak.segment.aws.AwsPersistence; import org.apache.jackrabbit.oak.segment.aws.Configuration; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -289,7 +289,7 @@ public Oak getOak(int clusterId) throws Exception { CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(azureContainerName); container.createIfNotExists(); CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath); - fileStoreBuilder.withCustomPersistence(new AzurePersistence(directory)); + fileStoreBuilder.withCustomPersistence(new AzurePersistenceV8(directory)); } BlobStore blobStore = null; @@ -341,7 +341,7 @@ public Oak[] setUpCluster(int n, StatisticsProvider statsProvider) throws Except container.createIfNotExists(); containers[i] = container; CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath + "/primary-" + i); - builder.withCustomPersistence(new AzurePersistence(directory)); + builder.withCustomPersistence(new AzurePersistenceV8(directory)); } if (blobStore != null) { diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java index d3720c0fc1b..0528230d01d 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/SegmentTarFixtureProvider.java @@ -30,7 +30,7 @@ import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -53,12 +53,12 @@ static NodeStore configureSegment(Options options, BlobStore blobStore, Whiteboa FileStoreBuilder builder; if (segmentStoreType == ToolUtils.SegmentStoreType.AZURE) { - final AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager(); + final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); SegmentNodeStorePersistence segmentNodeStorePersistence = - ToolUtils.newSegmentNodeStorePersistence(segmentStoreType, pathOrUri, azureStorageCredentialManager); + ToolUtils.newSegmentNodeStorePersistence(segmentStoreType, pathOrUri, azureStorageCredentialManagerV8); File tempDir = Files.createTempDirectory("azure-segment-store").toFile(); closer.register(() -> FileUtils.deleteQuietly(tempDir)); - closer.register(azureStorageCredentialManager); + closer.register(azureStorageCredentialManagerV8); builder = fileStoreBuilder(tempDir).withCustomPersistence(segmentNodeStorePersistence); } else { builder = fileStoreBuilder(new File(pathOrUri)).withMaxFileSize(256); diff --git a/oak-run-elastic/pom.xml b/oak-run-elastic/pom.xml index 640d7c62988..a1d31fc942d 100644 --- a/oak-run-elastic/pom.xml +++ b/oak-run-elastic/pom.xml @@ -42,7 +42,7 @@ 105 MB: Azure updates 107 MB: RDB/Tomcat (OAK-10752) --> - 112197632 + 113039632 diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java index 5f5b1580a00..c81082d24c2 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/AzureSegmentStoreExplorerBackend.java @@ -19,7 +19,7 @@ package org.apache.jackrabbit.oak.explorer; import org.apache.jackrabbit.guava.common.io.Files; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; @@ -39,16 +39,16 @@ public class AzureSegmentStoreExplorerBackend extends AbstractSegmentTarExplorerBackend { private final String path; private SegmentNodeStorePersistence persistence; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public AzureSegmentStoreExplorerBackend(String path) { this.path = path; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @Override public void open() throws IOException { - this.persistence = newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManager); + this.persistence = newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); try { this.store = fileStoreBuilder(Files.createTempDir()) @@ -63,7 +63,7 @@ public void open() throws IOException { @Override public void close() { super.close(); - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } @Override diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java index 0d2ee2c6a5f..e3c94654d75 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java @@ -28,7 +28,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.apache.jackrabbit.oak.run.commons.Command; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; @@ -87,8 +87,8 @@ public void execute(String... args) throws Exception { } } else { if (pathOrURI.startsWith("az:")) { - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - SegmentNodeStorePersistence azurePersistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, pathOrURI, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + SegmentNodeStorePersistence azurePersistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, pathOrURI, azureStorageCredentialManagerV8); ReadOnlyFileStore store = fileStoreBuilder(Files.createTempDir()).withCustomPersistence(azurePersistence).withBlobStore(newBasicReadOnlyBlobStore()).buildReadOnly(); statusCode = Diff.builder() .withPath(pathOrURI) diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java index 7d53878a923..17027b9df26 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java @@ -75,8 +75,8 @@ import org.apache.jackrabbit.oak.run.cli.BlobStoreOptions.Type; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; @@ -1141,7 +1141,7 @@ class SegmentStoreFixture implements StoreFixture { class AzureSegmentStoreFixture extends SegmentStoreFixture { private static final String AZURE_DIR = "repository"; private String container; - private final AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager(); + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); @Override public NodeStore init(DataStoreBlobStore blobStore, File storeFile) throws Exception { Properties props = AzureDataStoreUtils.getAzureConfig(); @@ -1151,14 +1151,14 @@ class AzureSegmentStoreFixture extends SegmentStoreFixture { container = container + System.currentTimeMillis(); // Create the azure segment container String connectionString = getAzureConnectionString(accessKey, secretKey, container, AZURE_DIR); - AzureUtilities.cloudBlobDirectoryFrom(connectionString, container, AZURE_DIR); + AzureUtilitiesV8.cloudBlobDirectoryFrom(connectionString, container, AZURE_DIR); // get the azure uri expected by the command storePath = getAzureUri(accessKey, container, AZURE_DIR); // initialize azure segment for test setup SegmentNodeStorePersistence segmentNodeStorePersistence = - ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, storePath, azureStorageCredentialManager); + ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, storePath, azureStorageCredentialManagerV8); fileStore = fileStoreBuilder(storeFile).withBlobStore(blobStore) .withCustomPersistence(segmentNodeStorePersistence).build(); @@ -1190,7 +1190,7 @@ protected String getAzureConnectionString(String accountName, String secret, Str public void after() { try { AzureDataStoreUtils.deleteContainer(container); - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } catch(Exception e) { log.error("Error in cleaning the container {}", container, e); } diff --git a/oak-segment-azure/pom.xml b/oak-segment-azure/pom.xml index db088ad9d04..4998550ee52 100644 --- a/oak-segment-azure/pom.xml +++ b/oak-segment-azure/pom.xml @@ -48,10 +48,12 @@ org.apache.jackrabbit.oak.segment.remote*, com.fasterxml.jackson.annotation;resolution:=optional, com.fasterxml.jackson.databind*;resolution:=optional, - com.fasterxml.jackson.dataformat.xml;resolution:=optional, + com.fasterxml.jackson.dataformat.*;resolution:=optional, com.fasterxml.jackson.datatype*;resolution:=optional, com.azure.identity.broker.implementation;resolution:=optional, - com.azure.xml;resolution:=optional, + com.azure.storage.blob*;resolution:=optional, + com.azure.storage.common*;resolution:=optional, + com.azure.storage.internal*;resolution:=optional, com.microsoft.aad.msal4jextensions*;resolution:=optional, com.nimbusds.langtag;resolution:=optional, com.sun.jna*;resolution:=optional, @@ -71,11 +73,16 @@ org.apache.jackrabbit.oak.segment.azure, org.apache.jackrabbit.oak.segment.azure.queue, org.apache.jackrabbit.oak.segment.azure.util, + com.fasterxml.jackson.dataformat.xml, + com.fasterxml.jackson.dataformat.xml.deser, com.microsoft.azure.storage, com.microsoft.azure.storage.core, com.microsoft.azure.storage.blob, com.azure.core.credential, - com.azure.identity + com.azure.identity, + com.azure.blob, + com.azure.storage.common, + com.azure.storage.internal.avro.implementation azure-storage, @@ -83,6 +90,11 @@ azure-core, azure-identity, azure-json, + azure-xml, + azure-storage-blob, + azure-storage-common, + azure-storage-internal-avro, + jackson-dataformat-xml, guava, jsr305, reactive-streams, @@ -188,6 +200,28 @@ azure-keyvault-core + + + com.azure + azure-storage-blob + 12.25.3 + + + com.azure + azure-storage-common + 12.24.3 + + + com.azure + azure-storage-internal-avro + 12.10.3 + + + com.azure + azure-xml + 1.0.0 + + com.azure diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java index 13eeab83d9a..298ba40d0ae 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java @@ -16,17 +16,19 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CopyStatus; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import com.azure.core.util.polling.PollResponse; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobCopyInfo; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.CopyStatusType; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; @@ -34,38 +36,43 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.net.URISyntaxException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.UUID; import java.util.Set; +import java.time.Duration; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; +import static com.azure.storage.blob.models.BlobType.BLOCK_BLOB; import static org.apache.jackrabbit.guava.common.base.Preconditions.checkArgument; import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.getName; public class AzureArchiveManager implements SegmentArchiveManager { - private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReader.class); + private static final Logger log = LoggerFactory.getLogger(AzureArchiveManager.class); + + protected final BlobContainerClient readBlobContainerClient; - protected final CloudBlobDirectory cloudBlobDirectory; + protected final BlobContainerClient writeBlobContainerClient; + + protected final String rootPrefix; protected final IOMonitor ioMonitor; protected final FileStoreMonitor monitor; private WriteAccessController writeAccessController; - public AzureArchiveManager(CloudBlobDirectory segmentstoreDirectory, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { - this.cloudBlobDirectory = segmentstoreDirectory; + public AzureArchiveManager(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; + this.rootPrefix = rootPrefix; this.ioMonitor = ioMonitor; this.monitor = fileStoreMonitor; this.writeAccessController = writeAccessController; @@ -74,18 +81,16 @@ public AzureArchiveManager(CloudBlobDirectory segmentstoreDirectory, IOMonitor i @Override public List listArchives() throws IOException { try { - List archiveNames = StreamSupport.stream(cloudBlobDirectory - .listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null) - .spliterator(), false) - .filter(i -> i instanceof CloudBlobDirectory) - .map(i -> (CloudBlobDirectory) i) - .filter(i -> getName(i).endsWith(".tar")) - .map(CloudBlobDirectory::getPrefix) + List archiveNames = readBlobContainerClient.listBlobsByHierarchy(rootPrefix + "/").stream() + .filter(BlobItem::isPrefix) + .filter(blobItem -> blobItem.getName().endsWith(".tar") || blobItem.getName().endsWith(".tar/")) + .map(BlobItem::getName) .map(Paths::get) .map(Path::getFileName) .map(Path::toString) .collect(Collectors.toList()); + Iterator it = archiveNames.iterator(); while (it.hasNext()) { String archiveName = it.next(); @@ -95,54 +100,57 @@ public List listArchives() throws IOException { } } return archiveNames; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } /** * Check if there's a valid 0000. segment in the archive + * * @param archiveName * @return true if the archive is empty (no 0000.* segment) */ - private boolean isArchiveEmpty(String archiveName) throws IOException, URISyntaxException, StorageException { - return !getDirectory(archiveName).listBlobs("0000.").iterator().hasNext(); + private boolean isArchiveEmpty(String archiveName) throws BlobStorageException { + String fullBlobPrefix = String.format("%s/%s", getDirectory(archiveName), "0000."); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(fullBlobPrefix); + return !readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().hasNext(); } @Override public SegmentArchiveReader open(String archiveName) throws IOException { try { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - if (!archiveDirectory.getBlockBlobReference("closed").exists()) { + String closedBlob = String.format("%s/%s", getDirectory(archiveName), "closed"); + if (!readBlobContainerClient.getBlobClient(closedBlob).exists()) { return null; } - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor); - } catch (StorageException | URISyntaxException e) { + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor); + } catch (BlobStorageException e) { throw new IOException(e); } } @Override public SegmentArchiveReader forceOpen(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor); + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor); } @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriter(getDirectory(archiveName), ioMonitor, monitor, writeAccessController); + return new AzureSegmentArchiveWriter(writeBlobContainerClient, rootPrefix, archiveName, ioMonitor, monitor, writeAccessController); } @Override public boolean delete(String archiveName) { try { getBlobs(archiveName) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { writeAccessController.checkWritingAllowed(); - cloudBlob.delete(); - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + log.error("Can't delete segment {}", blobItem.getName(), e); } }); return true; @@ -155,14 +163,14 @@ public boolean delete(String archiveName) { @Override public boolean renameTo(String from, String to) { try { - CloudBlobDirectory targetDirectory = getDirectory(to); + String targetDirectory = getDirectory(to); getBlobs(from) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { writeAccessController.checkWritingAllowed(); - renameBlob(cloudBlob, targetDirectory); + renameBlob(blobItem, targetDirectory); } catch (IOException e) { - log.error("Can't rename segment {}", cloudBlob.getUri().getPath(), e); + log.error("Can't rename segment {}", blobItem.getName(), e); } }); return true; @@ -174,13 +182,14 @@ public boolean renameTo(String from, String to) { @Override public void copyFile(String from, String to) throws IOException { - CloudBlobDirectory targetDirectory = getDirectory(to); + String targetDirectory = getDirectory(to); getBlobs(from) - .forEach(cloudBlob -> { + .forEach(blobItem -> { try { - copyBlob(cloudBlob, targetDirectory); + log.info("Backup segment: {}", blobItem.getName()); + copyBlob(blobItem, targetDirectory); } catch (IOException e) { - log.error("Can't copy segment {}", cloudBlob.getUri().getPath(), e); + log.error("Can't copy segment {}", blobItem.getName(), e); } }); } @@ -188,8 +197,10 @@ public void copyFile(String from, String to) throws IOException { @Override public boolean exists(String archiveName) { try { - return getDirectory(archiveName).listBlobsSegmented(null, false, null, 1, null, null, null).getLength() > 0; - } catch (IOException | StorageException | URISyntaxException e) { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(getDirectory(archiveName)); + return readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().hasNext(); + } catch (BlobStorageException e) { log.error("Can't check the existence of {}", archiveName, e); return false; } @@ -200,7 +211,7 @@ public void recoverEntries(String archiveName, LinkedHashMap entri Pattern pattern = Pattern.compile(RemoteUtilities.SEGMENT_FILE_NAME_PATTERN); List entryList = new ArrayList<>(); - for (CloudBlob b : getBlobs(archiveName)) { + for (BlobItem b : getBlobs(archiveName)) { String name = getName(b); Matcher m = pattern.matcher(name); if (!m.matches()) { @@ -208,12 +219,12 @@ public void recoverEntries(String archiveName, LinkedHashMap entri } int position = Integer.parseInt(m.group(1), 16); UUID uuid = UUID.fromString(m.group(2)); - long length = b.getProperties().getLength(); + long length = b.getProperties().getContentLength(); if (length > 0) { - byte[] data = new byte[(int) length]; + byte[] data; try { - b.downloadToByteArray(data, 0); - } catch (StorageException e) { + data = readBlobContainerClient.getBlobClient(b.getName()).downloadContent().toBytes(); + } catch (BlobStorageException e) { throw new IOException(e); } entryList.add(new RecoveredEntry(position, uuid, data, name)); @@ -234,13 +245,13 @@ public void recoverEntries(String archiveName, LinkedHashMap entri } private void delete(String archiveName, Set recoveredEntries) throws IOException { - getBlobs(archiveName) - .forEach(cloudBlob -> { - if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(cloudBlob)))) { + getBlobs(archiveName + "/") + .forEach(blobItem -> { + if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(blobItem)))) { try { - cloudBlob.delete(); - } catch (StorageException e) { - log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + log.error("Can't delete segment {}", blobItem.getName(), e); } } }); @@ -257,51 +268,42 @@ public void backup(@NotNull String archiveName, @NotNull String backupArchiveNam delete(archiveName, recoveredEntries); } - protected CloudBlobDirectory getDirectory(String archiveName) throws IOException { - try { - return cloudBlobDirectory.getDirectoryReference(archiveName); - } catch (URISyntaxException e) { - throw new IOException(e); - } + protected String getDirectory(String archiveName) { + return String.format("%s/%s", rootPrefix, archiveName); } - private List getBlobs(String archiveName) throws IOException { - return AzureUtilities.getBlobs(getDirectory(archiveName)); + private List getBlobs(String archiveName) throws IOException { + String archivePath = getDirectory(archiveName); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(archivePath); + + return AzureUtilities.getBlobs(readBlobContainerClient, listBlobsOptions); } - private void renameBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + private void renameBlob(BlobItem blob, String newParent) throws IOException { copyBlob(blob, newParent); try { - blob.delete(); - } catch (StorageException e) { + writeBlobContainerClient.getBlobClient(blob.getName()).delete(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private void copyBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { - checkArgument(blob instanceof CloudBlockBlob, "Only page blobs are supported for the rename"); - try { - String blobName = getName(blob); - CloudBlockBlob newBlob = newParent.getBlockBlobReference(blobName); - newBlob.startCopy(blob.getUri()); - - boolean isStatusPending = true; - while (isStatusPending) { - newBlob.downloadAttributes(); - if (newBlob.getCopyState().getStatus() == CopyStatus.PENDING) { - Thread.sleep(100); - } else { - isStatusPending = false; - } - } + private void copyBlob(BlobItem blob, String newParent) throws IOException { + checkArgument(blob.getProperties().getBlobType() == BLOCK_BLOB, "Only page blobs are supported for the rename"); - CopyStatus finalStatus = newBlob.getCopyState().getStatus(); - if (newBlob.getCopyState().getStatus() != CopyStatus.SUCCESS) { - throw new IOException("Invalid copy status for " + blob.getUri().getPath() + ": " + finalStatus); - } - } catch (StorageException | InterruptedException | URISyntaxException e) { - throw new IOException(e); + BlockBlobClient sourceBlobClient = readBlobContainerClient.getBlobClient(blob.getName()).getBlockBlobClient(); + + String destinationBlob = String.format("%s/%s", newParent, AzureUtilities.getName(blob)); + BlockBlobClient destinationBlobClient = writeBlobContainerClient.getBlobClient(destinationBlob).getBlockBlobClient(); + + PollResponse response = destinationBlobClient.beginCopy(sourceBlobClient.getBlobUrl(), Duration.ofMillis(100)).waitForCompletion(); + + String finalStatus = response.getValue().getCopyStatus().toString(); + if (response.getValue().getCopyStatus() != CopyStatusType.SUCCESS) { + throw new IOException("Invalid copy status for " + blob.getName() + ": " + finalStatus); } + } private static class RecoveredEntry implements Comparable { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java index c33094750b7..6f0094b59d1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalFile.java @@ -16,34 +16,32 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; import org.apache.commons.io.IOUtils; import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; public class AzureGCJournalFile implements GCJournalFile { - private final CloudAppendBlob gcJournal; + private final AppendBlobClient gcJournal; - public AzureGCJournalFile(CloudAppendBlob gcJournal) { + public AzureGCJournalFile(AppendBlobClient gcJournal) { this.gcJournal = gcJournal; } @Override public void writeLine(String line) throws IOException { try { - if (!gcJournal.exists()) { - gcJournal.createOrReplace(); - } - gcJournal.appendText(line + "\n", StandardCharsets.UTF_8.name(), null, null, null); - } catch (StorageException e) { + String appendLine = line + "\n"; + gcJournal.createIfNotExists(); + gcJournal.appendBlock(new ByteArrayInputStream((appendLine).getBytes()), appendLine.length()); + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -54,10 +52,9 @@ public List readLines() throws IOException { if (!gcJournal.exists()) { return Collections.emptyList(); } - byte[] data = new byte[(int) gcJournal.getProperties().getLength()]; - gcJournal.downloadToByteArray(data, 0); + byte[] data = gcJournal.downloadContent().toBytes(); return IOUtils.readLines(new ByteArrayInputStream(data), Charset.defaultCharset()); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -65,10 +62,8 @@ public List readLines() throws IOException { @Override public void truncate() throws IOException { try { - if (gcJournal.exists()) { - gcJournal.delete(); - } - } catch (StorageException e) { + gcJournal.deleteIfExists(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java new file mode 100644 index 00000000000..cf029f64ba7 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java @@ -0,0 +1,52 @@ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +public class AzureHttpRequestLoggingPolicy implements HttpPipelinePolicy { + + private static final Logger log = LoggerFactory.getLogger(AzureHttpRequestLoggingPolicy.class); + + private final boolean verboseEnabled = Boolean.getBoolean("segment.azure.v12.http.verbose.enabled"); + + private RemoteStoreMonitor remoteStoreMonitor; + + public void setRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + log.info("Enable Azure Remote store Monitor"); + this.remoteStoreMonitor = remoteStoreMonitor; + } + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + Stopwatch stopwatch = Stopwatch.createStarted(); + + return next.process().flatMap(httpResponse -> { + if (remoteStoreMonitor != null) { + remoteStoreMonitor.requestDuration(stopwatch.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); + if (httpResponse.getStatusCode() > 201) { + remoteStoreMonitor.requestError(); + } else { + remoteStoreMonitor.requestCount(); + } + } + + if (verboseEnabled) { + log.info("HTTP Request: {} {}", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl()); + log.info("Status code is: {}", httpResponse.getStatusCode()); + log.info("Response time: {}ms", (stopwatch.elapsed(TimeUnit.NANOSECONDS))/1_000_000); + } + + return Mono.just(httpResponse); + }); + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java index 76b52de1136..b4da690b625 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java @@ -16,15 +16,13 @@ */ package org.apache.jackrabbit.oak.segment.azure; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.BlobType; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.AppendBlobClient; import org.apache.jackrabbit.guava.common.collect.ImmutableList; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.ListBlobItem; -import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; import org.apache.jackrabbit.oak.segment.azure.util.CaseInsensitiveKeysMapAccess; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; @@ -33,15 +31,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.ByteArrayInputStream; import java.io.IOException; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static org.apache.jackrabbit.guava.common.collect.Lists.partition; @@ -51,7 +46,9 @@ public class AzureJournalFile implements JournalFile { private static final int JOURNAL_LINE_LIMIT = Integer.getInteger("org.apache.jackrabbit.oak.segment.azure.journal.lines", 40_000); - private final CloudBlobDirectory directory; + private final BlobContainerClient readBlobContainerClient; + + private final BlobContainerClient writeBlobContainerClient; private final String journalNamePrefix; @@ -59,20 +56,21 @@ public class AzureJournalFile implements JournalFile { private final WriteAccessController writeAccessController; - AzureJournalFile(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { - this.directory = directory; + AzureJournalFile(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; this.journalNamePrefix = journalNamePrefix; this.lineLimit = lineLimit; this.writeAccessController = writeAccessController; } - public AzureJournalFile(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController) { - this(directory, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); + public AzureJournalFile(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String journalNamePrefix, WriteAccessController writeAccessController) { + this(readBlobContainerClient, writeBlobContainerClient, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); } @Override public JournalFileReader openJournalReader() throws IOException { - return new CombinedReader(getJournalBlobs()); + return new CombinedReader(readBlobContainerClient, getJournalBlobs()); } @Override @@ -99,26 +97,25 @@ private String getJournalFileName(int index) { return String.format("%s.%03d", journalNamePrefix, index); } - private List getJournalBlobs() throws IOException { + private List getJournalBlobs() throws IOException { try { - List result = new ArrayList<>(); - for (ListBlobItem b : directory.listBlobs(journalNamePrefix)) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } else { - log.warn("Invalid blob type: {} {}", b.getUri(), b.getClass()); - } - } - result.sort(Comparator.comparing(AzureUtilities::getName).reversed()); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(journalNamePrefix); + List result = readBlobContainerClient.listBlobs(listBlobsOptions, null).stream() + .filter(blobItem -> blobItem.getProperties().getBlobType().equals(BlobType.APPEND_BLOB)) + .collect(Collectors.toList()); + result.sort(Comparator.comparing(AzureUtilities::getName).reversed()); return result; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } private static class AzureJournalReader implements JournalFileReader { - private final CloudBlob blob; + private final BlobContainerClient blobContainerClient; + + private final BlobItem blob; private ReverseFileReader reader; @@ -126,7 +123,8 @@ private static class AzureJournalReader implements JournalFileReader { private boolean firstLineReturned; - private AzureJournalReader(CloudBlob blob) { + private AzureJournalReader(BlobContainerClient blobContainerClient, BlobItem blob) { + this.blobContainerClient = blobContainerClient; this.blob = blob; } @@ -135,19 +133,19 @@ public String readLine() throws IOException { if (reader == null) { try { if (!metadataFetched) { - blob.downloadAttributes(); - metadataFetched = true; Map metadata = CaseInsensitiveKeysMapAccess.convert(blob.getMetadata()); + metadataFetched = true; if (metadata.containsKey("lastEntry")) { firstLineReturned = true; return metadata.get("lastEntry"); } } - reader = new ReverseFileReader(blob); + reader = new ReverseFileReader(blobContainerClient, blob); if (firstLineReturned) { - while("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it + while ("".equals(reader.readLine())) + ; // the first line was already returned, let's fast-forward it } - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -161,33 +159,24 @@ public void close() throws IOException { private class AzureJournalWriter implements JournalFileWriter { - private CloudAppendBlob currentBlob; + private AppendBlobClient currentBlob; private int lineCount; - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - public AzureJournalWriter() throws IOException { - writeOptimisedBlobRequestOptions = AzureRequestOptions.optimiseForWriteOperations(directory.getServiceClient().getDefaultRequestOptions()); - - List blobs = getJournalBlobs(); + List blobs = getJournalBlobs(); if (blobs.isEmpty()) { try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(1)); - currentBlob.createOrReplace(); - currentBlob.downloadAttributes(); - } catch (URISyntaxException | StorageException e) { + currentBlob = writeBlobContainerClient.getBlobClient(getJournalFileName(1)).getAppendBlobClient(); + currentBlob.createIfNotExists(); + } catch (BlobStorageException e) { throw new IOException(e); } } else { - currentBlob = blobs.get(0); + currentBlob = writeBlobContainerClient.getBlobClient(blobs.get(0).getName()).getAppendBlobClient(); } - try { - currentBlob.downloadAttributes(); - } catch (StorageException e) { - throw new IOException(e); - } - String lc = currentBlob.getMetadata().get("lineCount"); + + String lc = currentBlob.getProperties().getMetadata().get("lineCount"); lineCount = lc == null ? 0 : Integer.parseInt(lc); } @@ -196,12 +185,11 @@ public void truncate() throws IOException { try { writeAccessController.checkWritingAllowed(); - for (CloudAppendBlob cloudAppendBlob : getJournalBlobs()) { - cloudAppendBlob.delete(DeleteSnapshotsOption.NONE, null, writeOptimisedBlobRequestOptions, null); + for (BlobItem blobItem : getJournalBlobs()) { + writeBlobContainerClient.getBlobClient(blobItem.getName()).delete(); } - createNextFile(0); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -222,9 +210,9 @@ public void batchWriteLines(List lines) throws IOException { List firstBlock = lines.subList(0, firstBlockSize); List> remainingBlocks = partition(lines.subList(firstBlockSize, lines.size()), lineLimit); List> allBlocks = ImmutableList.>builder() - .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) - .addAll(remainingBlocks) - .build(); + .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) + .addAll(remainingBlocks) + .build(); for (List entries : allBlocks) { if (lineCount >= lineLimit) { @@ -236,12 +224,15 @@ public void batchWriteLines(List lines) throws IOException { text.append(line).append("\n"); } try { - currentBlob.appendText(text.toString(), null, null, writeOptimisedBlobRequestOptions, null); - currentBlob.getMetadata().put("lastEntry", entries.get(entries.size() - 1)); + currentBlob.appendBlock(new ByteArrayInputStream(text.toString().getBytes()), text.length()); + Map metadata = new HashMap<>(currentBlob.getProperties().getMetadata()); + metadata.put("lastEntry", entries.get(entries.size() - 1)); + lineCount += entries.size(); - currentBlob.getMetadata().put("lineCount", Integer.toString(lineCount)); - currentBlob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + + metadata.put("lineCount", Integer.toString(lineCount)); + currentBlob.setMetadata(metadata); + } catch (BlobStorageException e) { throw new IOException(e); } } @@ -249,17 +240,17 @@ public void batchWriteLines(List lines) throws IOException { private void createNextFile(int suffix) throws IOException { try { - currentBlob = directory.getAppendBlobReference(getJournalFileName(suffix + 1)); - currentBlob.createOrReplace(null, writeOptimisedBlobRequestOptions, null); + currentBlob = writeBlobContainerClient.getBlobClient(getJournalFileName(suffix + 1)).getAppendBlobClient(); + currentBlob.createIfNotExists(); lineCount = 0; - } catch (URISyntaxException | StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } private int parseCurrentSuffix() { String name = AzureUtilities.getName(currentBlob); - Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)" ); + Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)"); Matcher matcher = pattern.matcher(name); int parsedSuffix; if (matcher.find()) { @@ -289,8 +280,8 @@ private static class CombinedReader implements JournalFileReader { private JournalFileReader currentReader; - private CombinedReader(List blobs) { - readers = blobs.stream().map(AzureJournalReader::new).iterator(); + private CombinedReader(BlobContainerClient blobContainerClient, List blobs) { + readers = blobs.stream().map(blobItem -> new AzureJournalReader(blobContainerClient, blobItem)).iterator(); } @Override diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java index aae72c12003..35c2accd890 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFile.java @@ -16,8 +16,9 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,9 +32,9 @@ public class AzureManifestFile implements ManifestFile { private static final Logger log = LoggerFactory.getLogger(AzureManifestFile.class); - private final CloudBlockBlob manifestBlob; + private final BlockBlobClient manifestBlob; - public AzureManifestFile(CloudBlockBlob manifestBlob) { + public AzureManifestFile(BlockBlobClient manifestBlob) { this.manifestBlob = manifestBlob; } @@ -41,7 +42,7 @@ public AzureManifestFile(CloudBlockBlob manifestBlob) { public boolean exists() { try { return manifestBlob.exists(); - } catch (StorageException e) { + } catch (BlobStorageException e) { log.error("Can't check if the manifest exists", e); return false; } @@ -51,14 +52,12 @@ public boolean exists() { public Properties load() throws IOException { Properties properties = new Properties(); if (exists()) { - long length = manifestBlob.getProperties().getLength(); - byte[] data = new byte[(int) length]; try { - manifestBlob.downloadToByteArray(data, 0); - } catch (StorageException e) { + byte[] data = manifestBlob.downloadContent().toBytes(); + properties.load(new ByteArrayInputStream(data)); + } catch (BlobStorageException e) { throw new IOException(e); } - properties.load(new ByteArrayInputStream(data)); } return properties; } @@ -70,8 +69,8 @@ public void save(Properties properties) throws IOException { byte[] data = bos.toByteArray(); try { - manifestBlob.uploadFromByteArray(data, 0, data.length); - } catch (StorageException e) { + manifestBlob.upload(BinaryData.fromBytes(data), true); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java index dc41fbff1e4..2bb687a12da 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java @@ -16,69 +16,60 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import java.io.IOException; -import java.net.URISyntaxException; -import java.nio.file.Paths; -import java.util.Date; -import java.util.EnumSet; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RequestCompletedEvent; -import com.microsoft.azure.storage.StorageEvent; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; -import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; + public class AzurePersistence implements SegmentNodeStorePersistence { private static final Logger log = LoggerFactory.getLogger(AzurePersistence.class); - protected final CloudBlobDirectory segmentstoreDirectory; + protected final BlobContainerClient readBlobContainerClient; + + protected BlobContainerClient writeBlobContainerClient; + + protected final String rootPrefix; + + protected AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy; protected WriteAccessController writeAccessController = new WriteAccessController(); - public AzurePersistence(CloudBlobDirectory segmentStoreDirectory) { - this.segmentstoreDirectory = segmentStoreDirectory; + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix) { + this(readBlobContainerClient, writeBlobContainerClient, rootPrefix, null); + } - AzureRequestOptions.applyDefaultRequestOptions(segmentStoreDirectory.getServiceClient().getDefaultRequestOptions()); + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { + this.readBlobContainerClient = readBlobContainerClient; + this.writeBlobContainerClient = writeBlobContainerClient; + this.azureHttpRequestLoggingPolicy = azureHttpRequestLoggingPolicy; + this.rootPrefix = rootPrefix; } @Override public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { attachRemoteStoreMonitor(remoteStoreMonitor); - return new AzureArchiveManager(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController); + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix, ioMonitor, fileStoreMonitor, writeAccessController); } @Override public boolean segmentFilesExist() { try { - for (ListBlobItem i : segmentstoreDirectory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null)) { - if (i instanceof CloudBlobDirectory) { - CloudBlobDirectory dir = (CloudBlobDirectory) i; - String name = Paths.get(dir.getPrefix()).getFileName().toString(); - if (name.endsWith(".tar")) { - return true; - } - } - } - return false; - } catch (StorageException | URISyntaxException e) { + return readBlobContainerClient.listBlobsByHierarchy(rootPrefix + "/").stream() + .filter(BlobItem::isPrefix) + .anyMatch(blobItem -> blobItem.getName().endsWith(".tar") || blobItem.getName().endsWith(".tar/")); + } catch (BlobStorageException e) { log.error("Can't check if the segment archives exists", e); return false; } @@ -86,7 +77,7 @@ public boolean segmentFilesExist() { @Override public JournalFile getJournalFile() { - return new AzureJournalFile(segmentstoreDirectory, "journal.log", writeAccessController); + return new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, rootPrefix + "/journal.log", writeAccessController); } @Override @@ -101,55 +92,36 @@ public ManifestFile getManifestFile() throws IOException { @Override public RepositoryLock lockRepository() throws IOException { - return new AzureRepositoryLock(getBlockBlob("repo.lock"), () -> { + BlockBlobClient blockBlobClient = getBlockBlob("repo.lock"); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + return new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> { log.warn("Lost connection to the Azure. The client will be closed."); // TODO close the connection }, writeAccessController).lock(); } - private CloudBlockBlob getBlockBlob(String path) throws IOException { + private BlockBlobClient getBlockBlob(String path) throws IOException { try { - return segmentstoreDirectory.getBlockBlobReference(path); - } catch (URISyntaxException | StorageException e) { + return readBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private CloudAppendBlob getAppendBlob(String path) throws IOException { + private AppendBlobClient getAppendBlob(String path) throws IOException { try { - return segmentstoreDirectory.getAppendBlobReference(path); - } catch (URISyntaxException | StorageException e) { + return readBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getAppendBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } - private static void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { - OperationContext.getGlobalRequestCompletedEventHandler().addListener(new StorageEvent() { - - @Override - public void eventOccurred(RequestCompletedEvent e) { - Date startDate = e.getRequestResult().getStartDate(); - Date stopDate = e.getRequestResult().getStopDate(); - - if (startDate != null && stopDate != null) { - long requestDuration = stopDate.getTime() - startDate.getTime(); - remoteStoreMonitor.requestDuration(requestDuration, TimeUnit.MILLISECONDS); - } - - Exception exception = e.getRequestResult().getException(); - - if (exception == null) { - remoteStoreMonitor.requestCount(); - } else { - remoteStoreMonitor.requestError(); - } - } - - }); + private void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + if (azureHttpRequestLoggingPolicy != null) {azureHttpRequestLoggingPolicy.setRemoteStoreMonitor(remoteStoreMonitor);} } - public CloudBlobDirectory getSegmentstoreDirectory() { - return segmentstoreDirectory; + public BlobContainerClient getReadBlobContainerClient() { + return readBlobContainerClient; } public void setWriteAccessController(WriteAccessController writeAccessController) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java new file mode 100644 index 00000000000..e990e1e4867 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -0,0 +1,197 @@ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.identity.ClientSecretCredential; +import com.azure.identity.ClientSecretCredentialBuilder; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.apache.jackrabbit.oak.segment.azure.util.Environment; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.*; + +public class AzurePersistenceManager { + + private static final Logger log = LoggerFactory.getLogger(AzurePersistenceManager.class); + + private AzurePersistenceManager() { + } + + public static AzurePersistence createAzurePersistenceFrom(@NotNull String accountName, @NotNull String containerName, @NotNull String rootPrefix, @NotNull Environment environment) throws IOException { + final String clientId = environment.getVariable(AZURE_CLIENT_ID); + final String clientSecret = environment.getVariable(AZURE_CLIENT_SECRET); + final String tenantId = environment.getVariable(AZURE_TENANT_ID); + + if (StringUtils.isNoneBlank(clientId, clientSecret, tenantId)) { + try { + return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefix, clientId, clientSecret, tenantId, false, false); + } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { + log.error("Error occurred while connecting to Azure Storage using service principals: ", e); + throw new IllegalArgumentException( + "Could not connect to the Azure Storage. Please verify if AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables are correctly set!"); + } + } + + log.warn("AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables empty or missing. Switching to authentication with AZURE_SECRET_KEY."); + + String key = environment.getVariable(AZURE_SECRET_KEY); + try { + return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefix, false, false); + } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { + log.error("Error occurred while connecting to Azure Storage using secret key: ", e); + throw new IllegalArgumentException( + "Could not connect to the Azure Storage. Please verify if AZURE_SECRET_KEY environment variable is correctly set!"); + } + } + + public static AzurePersistence createAzurePersistenceFrom(Configuration configuration) throws IOException { + if (!StringUtils.isBlank(configuration.connectionURL())) { + return createPersistenceFromConnectionURL(configuration); + } + if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { + return createPersistenceFromServicePrincipalCredentials(configuration); + } + if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { + return createPersistenceFromSasUri(configuration); + } + return createPersistenceFromAccessKey(configuration); + } + + private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { + return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), configuration.rootPath(), configuration.enableSecondaryLocation(), true); + } + + private static AzurePersistence createPersistenceFromAccessKey(String accountName, String containerName, String accessKey, String blobEndpoint, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(accountName).append(';'); + connectionString.append("AccountKey=").append(accessKey).append(';'); + if (!StringUtils.isBlank(blobEndpoint)) { + connectionString.append("BlobEndpoint=").append(blobEndpoint).append(';'); + } + return createAzurePersistence(connectionString.toString(), accountName, containerName, rootPrefix, enableSecondaryLocation, createContainer); + } + + @NotNull + private static AzurePersistence createPersistenceFromConnectionURL(Configuration configuration) throws IOException { + return createAzurePersistence(configuration.connectionURL(), configuration, true); + } + + private static AzurePersistence createPersistenceFromSasUri(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + return createAzurePersistence(connectionString.toString(), configuration, false); + } + + + @NotNull + private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { + String path = normalizePath(configuration.rootPath()); + return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), path, configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); + } + + private static AzurePersistence createPersistenceFromServicePrincipalCredentials(String accountName, String containerName, String rootPrefix, String clientId, String clientSecret, String tenantId, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy = new AzureHttpRequestLoggingPolicy(); + + ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() + .clientId(clientId) + .clientSecret(clientSecret) + .tenantId(tenantId) + .build(); + + RequestRetryOptions retryOptions = readRequestRetryOptions(enableSecondaryLocation, accountName); + BlobContainerClient blobContainerClient = getBlobContainerClient(accountName, containerName, retryOptions, azureHttpRequestLoggingPolicy, clientSecretCredential); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + BlobContainerClient writeContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, clientSecretCredential); + + if (createContainer) { + blobContainerClient.createIfNotExists(); + } + + return new AzurePersistence(blobContainerClient, writeContainerClient, rootPrefix, azureHttpRequestLoggingPolicy); + } + + @NotNull + private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { + String path = normalizePath(configuration.rootPath()); + return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), path, configuration.enableSecondaryLocation(), createContainer); + } + + @NotNull + private static AzurePersistence createAzurePersistence(String connectionString, String accountName, String containerName, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { + try { + AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy = new AzureHttpRequestLoggingPolicy(); + + RequestRetryOptions retryOptions = readRequestRetryOptions(enableSecondaryLocation, accountName); + BlobContainerClient blobContainerClient = getBlobContainerClient(accountName, containerName, retryOptions, azureHttpRequestLoggingPolicy, connectionString); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + BlobContainerClient writeBlobContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, connectionString); + + if (createContainer) { + blobContainerClient.createIfNotExists(); + } + + return new AzurePersistence(blobContainerClient, writeBlobContainerClient, rootPrefix, azureHttpRequestLoggingPolicy); + } catch (BlobStorageException e) { + throw new IOException(e); + } + } + + private static BlobContainerClient getBlobContainerClient(String accountName, String containerName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy, String connectionString) { + BlobServiceClient blobServiceClient = blobServiceClientBuilder(accountName, requestRetryOptions, azureHttpRequestLoggingPolicy) + .connectionString(connectionString) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + private static BlobContainerClient getBlobContainerClient(String accountName, String containerName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy, ClientSecretCredential clientSecretCredential) { + BlobServiceClient blobServiceClient = blobServiceClientBuilder(accountName, requestRetryOptions, azureHttpRequestLoggingPolicy) + .credential(clientSecretCredential) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + private static BlobServiceClientBuilder blobServiceClientBuilder(String accountName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { + String endpoint = String.format("https://%s.blob.core.windows.net", accountName); + + return new BlobServiceClientBuilder() + .endpoint(endpoint) + .addPolicy(azureHttpRequestLoggingPolicy) + .retryOptions(requestRetryOptions); + } + + private static RequestRetryOptions readRequestRetryOptions(boolean enableSecondaryLocation, String accountName) { + RequestRetryOptions retryOptions = AzureRequestOptions.getRetryOptionsDefault(); + if (enableSecondaryLocation) { + String endpointSecondaryRegion = String.format("https://%s-secondary.blob.core.windows.net", accountName); + retryOptions = AzureRequestOptions.getRetryOptionsDefault(endpointSecondaryRegion); + } + return retryOptions; + } + + @NotNull + private static String normalizePath(@NotNull String rootPath) { + if (!rootPath.isEmpty() && rootPath.charAt(0) == '/') { + return rootPath.substring(1); + } + return rootPath; + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java index 01a24511f75..e60f94d3802 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java @@ -16,20 +16,19 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.RetryNoRetry; -import com.microsoft.azure.storage.StorageErrorCode; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.azure.core.http.RequestConditions; +import com.azure.core.util.Context; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.time.Duration; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -53,7 +52,9 @@ public class AzureRepositoryLock implements RepositoryLock { private final Runnable shutdownHook; - private final CloudBlockBlob blob; + private final BlockBlobClient blockBlobClient; + + private final BlobLeaseClient leaseClient; private final ExecutorService executor; @@ -65,13 +66,14 @@ public class AzureRepositoryLock implements RepositoryLock { private volatile boolean doUpdate; - public AzureRepositoryLock(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController) { - this(blob, shutdownHook, writeAccessController, TIMEOUT_SEC); + public AzureRepositoryLock(BlockBlobClient blockBlobClient, BlobLeaseClient leaseClient, Runnable shutdownHook, WriteAccessController writeAccessController) { + this(blockBlobClient, leaseClient, shutdownHook, writeAccessController, TIMEOUT_SEC); } - public AzureRepositoryLock(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { + public AzureRepositoryLock(BlockBlobClient blockBlobClient, BlobLeaseClient leaseClient, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { this.shutdownHook = shutdownHook; - this.blob = blob; + this.blockBlobClient = blockBlobClient; + this.leaseClient = leaseClient; this.executor = Executors.newSingleThreadExecutor(); this.timeoutSec = timeoutSec; this.writeAccessController = writeAccessController; @@ -87,13 +89,13 @@ public AzureRepositoryLock lock() throws IOException { Exception ex = null; do { try { - blob.openOutputStream().close(); + blockBlobClient.getBlobOutputStream().close(); log.info("{} = {}", LEASE_DURATION_PROP, leaseDuration); log.info("{} = {}", RENEWAL_INTERVAL_PROP, renewalInterval); log.info("{} = {}", TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, timeToWaitBeforeWriteBlock); - leaseId = blob.acquireLease(leaseDuration, null); + leaseId = leaseClient.acquireLease(leaseDuration); writeAccessController.enableWriting(); log.info("Acquired lease {}", leaseId); } catch (Exception e) { @@ -128,11 +130,7 @@ private void refreshLease() { long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; try { if (timeSinceLastUpdate > renewalInterval) { - - BlobRequestOptions requestOptions = new BlobRequestOptions(); - requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); - requestOptions.setRetryPolicyFactory(new RetryNoRetry()); - blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); + leaseId = leaseClient.renewLeaseWithResponse((RequestConditions) null, Duration.ofMillis(LEASE_RENEWAL_TIMEOUT_MS), Context.NONE).getValue(); writeAccessController.enableWriting(); lastUpdate = System.currentTimeMillis(); @@ -144,14 +142,14 @@ private void refreshLease() { writeAccessController.disableWriting(); } - if (e instanceof StorageException) { - StorageException storageException = (StorageException) e; - if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT - , StorageErrorCode.SERVICE_INTERNAL_ERROR - , StorageErrorCodeStrings.SERVER_BUSY - , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { + if (e instanceof BlobStorageException) { + BlobStorageException storageException = (BlobStorageException) e; + if (Set.of(BlobErrorCode.OPERATION_TIMED_OUT, + BlobErrorCode.SERVER_BUSY, + BlobErrorCode.INTERNAL_ERROR).contains(storageException.getErrorCode())) { log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); - } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { + //TODO: ierandra + } else if (storageException.getStatusCode() == 306) { log.warn("Client side error. Retry in progress ...", e); } else { log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); @@ -186,12 +184,13 @@ public void unlock() throws IOException { private void releaseLease() throws IOException { try { - blob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); - blob.delete(); + leaseClient.releaseLease(); + blockBlobClient.delete(); log.info("Released lease {}", leaseId); leaseId = null; - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } + } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java index e6c636e9345..b5566f2f8c1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveReader.java @@ -16,33 +16,38 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.BlockBlobClient; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; import java.io.File; import java.io.IOException; -import java.net.URISyntaxException; import java.util.Map; import java.util.UUID; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; -import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; -import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; public class AzureSegmentArchiveReader extends AbstractRemoteSegmentArchiveReader { - private final CloudBlobDirectory archiveDirectory; + private final BlobContainerClient blobContainerClient; private final long length; - AzureSegmentArchiveReader(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor) throws IOException { + private final String archiveName; + + private final String archivePath; + + AzureSegmentArchiveReader(BlobContainerClient blobContainerClient, String rootPrefix, String archiveName, IOMonitor ioMonitor) throws IOException { super(ioMonitor); - this.archiveDirectory = archiveDirectory; + this.blobContainerClient = blobContainerClient; + this.archiveName = archiveName; + this.archivePath = String.format("%s/%s", rootPrefix, archiveName); this.length = computeArchiveIndexAndLength(); } @@ -53,19 +58,21 @@ public long length() { @Override public String getName() { - return AzureUtilities.getName(archiveDirectory); + return archiveName; } @Override protected long computeArchiveIndexAndLength() throws IOException { long length = 0; - for (CloudBlob blob : AzureUtilities.getBlobs(archiveDirectory)) { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(archivePath + "/"); + for (BlobItem blob : AzureUtilities.getBlobs(blobContainerClient, listBlobsOptions)) { Map metadata = blob.getMetadata(); if (AzureBlobMetadata.isSegment(metadata)) { - RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, (int) blob.getProperties().getLength()); + RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, blob.getProperties().getContentLength().intValue()); index.put(new UUID(indexEntry.getMsb(), indexEntry.getLsb()), indexEntry); } - length += blob.getProperties().getLength(); + length += blob.getProperties().getContentLength(); } return length; @@ -73,7 +80,7 @@ protected long computeArchiveIndexAndLength() throws IOException { @Override protected void doReadSegmentToBuffer(String segmentFileName, Buffer buffer) throws IOException { - readBufferFully(getBlob(segmentFileName), buffer); + readBufferFully(getBlobClient(segmentFileName), buffer); } @Override @@ -83,28 +90,29 @@ protected Buffer doReadDataFile(String extension) throws IOException { @Override protected File archivePathAsFile() { - return new File(archiveDirectory.getUri().getPath()); + return new File(archivePath); } - private CloudBlockBlob getBlob(String name) throws IOException { + private BlockBlobClient getBlobClient(String name) throws IOException { try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { + String fullName = String.format("%s/%s", archivePath, name); + return blobContainerClient.getBlobClient(fullName).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } private Buffer readBlob(String name) throws IOException { try { - CloudBlockBlob blob = getBlob(name); + BlockBlobClient blob = getBlobClient(name); if (!blob.exists()) { return null; } - long length = blob.getProperties().getLength(); + long length = blob.getProperties().getBlobSize(); Buffer buffer = Buffer.allocate((int) length); AzureUtilities.readBufferFully(blob, buffer); return buffer; - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java index e9bbbb322a9..e9b9f5d8a7a 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java @@ -16,51 +16,52 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; -import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.concurrent.TimeUnit; - -import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.guava.common.base.Stopwatch; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; -import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveWriter; import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; + public class AzureSegmentArchiveWriter extends AbstractRemoteSegmentArchiveWriter { - private final CloudBlobDirectory archiveDirectory; + private final BlobContainerClient blobContainerClient; + + private final String rootPrefix; + + private final String archiveName; private final Retrier retrier = Retrier.withParams( Integer.getInteger("azure.segment.archive.writer.retries.max", 16), Integer.getInteger("azure.segment.archive.writer.retries.intervalMs", 5000) ); - private final BlobRequestOptions writeOptimisedBlobRequestOptions; - - public AzureSegmentArchiveWriter(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { + public AzureSegmentArchiveWriter(BlobContainerClient blobContainerClient, String rootPrefix, String archiveName, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { super(ioMonitor, monitor); - this.archiveDirectory = archiveDirectory; + this.blobContainerClient = blobContainerClient; + this.rootPrefix = rootPrefix; + this.archiveName = archiveName; this.writeAccessController = writeAccessController; - this.writeOptimisedBlobRequestOptions = AzureRequestOptions.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); } @Override public String getName() { - return AzureUtilities.getName(archiveDirectory); + return archiveName; } @Override @@ -71,17 +72,16 @@ protected void doWriteArchiveEntry(RemoteSegmentArchiveEntry indexEntry, byte[] long msb = indexEntry.getMsb(); long lsb = indexEntry.getLsb(); String segmentName = getSegmentFileName(indexEntry); - CloudBlockBlob blob = getBlob(segmentName); - ioMonitor.beforeSegmentWrite(new File(blob.getName()), msb, lsb, size); + BlockBlobClient blob = getBlockBlobClient(segmentName); + ioMonitor.beforeSegmentWrite(new File(blob.getBlobName()), msb, lsb, size); Stopwatch stopwatch = Stopwatch.createStarted(); try { + blob.upload(BinaryData.fromBytes(Arrays.copyOfRange(data, offset, offset + size)), true); blob.setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); - blob.uploadFromByteArray(data, offset, size, null, writeOptimisedBlobRequestOptions, null); - blob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new IOException(e); } - ioMonitor.afterSegmentWrite(new File(blob.getName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); + ioMonitor.afterSegmentWrite(new File(blob.getBlobName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); } @Override @@ -92,7 +92,7 @@ protected Buffer doReadArchiveEntry(RemoteSegmentArchiveEntry indexEntry) throw } else { buffer = Buffer.allocate(indexEntry.getLength()); } - readBufferFully(getBlob(getSegmentFileName(indexEntry)), buffer); + readBufferFully(getBlockBlobClient(getSegmentFileName(indexEntry)), buffer); return buffer; } @@ -102,8 +102,8 @@ protected void doWriteDataFile(byte[] data, String extension) throws IOException try { writeAccessController.checkWritingAllowed(); - getBlob(getName() + extension).uploadFromByteArray(data, 0, data.length, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + getBlockBlobClient(getName() + extension).upload(BinaryData.fromBytes(data), true); + } catch (BlobStorageException e) { throw new IOException(e); } }); @@ -115,8 +115,8 @@ protected void afterQueueClosed() throws IOException { try { writeAccessController.checkWritingAllowed(); - getBlob("closed").uploadFromByteArray(new byte[0], 0, 0, null, writeOptimisedBlobRequestOptions, null); - } catch (StorageException e) { + getBlockBlobClient("closed").upload(BinaryData.fromBytes(new byte[0]), true); + } catch (BlobStorageException e) { throw new IOException(e); } }); @@ -127,10 +127,11 @@ protected void afterQueueFlushed() { // do nothing } - private CloudBlockBlob getBlob(String name) throws IOException { + private BlockBlobClient getBlockBlobClient(String name) throws IOException { + String blobFullName = String.format("%s/%s/%s", rootPrefix, archiveName, name); try { - return archiveDirectory.getBlockBlobReference(name); - } catch (URISyntaxException | StorageException e) { + return blobContainerClient.getBlobClient(blobFullName).getBlockBlobClient(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java index 24465ea720c..1465574d83d 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java @@ -18,16 +18,9 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureSegmentStoreServiceV8; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.jetbrains.annotations.NotNull; import org.osgi.framework.ServiceRegistration; import org.osgi.service.component.ComponentContext; import org.osgi.service.component.annotations.Activate; @@ -38,16 +31,14 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.Hashtable; import java.util.Objects; import static org.osgi.framework.Constants.SERVICE_PID; @Component( - configurationPolicy = ConfigurationPolicy.REQUIRE, - configurationPid = {Configuration.PID}) + configurationPolicy = ConfigurationPolicy.REQUIRE, + configurationPid = {Configuration.PID}) public class AzureSegmentStoreService { private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreService.class); @@ -57,21 +48,35 @@ public class AzureSegmentStoreService { public static final String DEFAULT_ROOT_PATH = "/oak"; public static final boolean DEFAULT_ENABLE_SECONDARY_LOCATION = false; - public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; private ServiceRegistration registration; - private static AzureStorageCredentialManager azureStorageCredentialManager; + + private final boolean useAzureSdkV12 = Boolean.getBoolean("segment.azure.v12.enabled"); + @Activate public void activate(ComponentContext context, Configuration config) throws IOException { - AzurePersistence persistence = createAzurePersistenceFrom(config); - registration = context.getBundleContext() - .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ - put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); - if (!Objects.equals(config.role(), "")) { - put("role", config.role()); - } - }}); + if (useAzureSdkV12) { + log.info("Starting nodestore using Azure SDK 12"); + AzurePersistence persistence = AzurePersistenceManager.createAzurePersistenceFrom(config); + registration = context.getBundleContext() + .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ + put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistence.class.getName(), config.accountName(), config.rootPath())); + if (!Objects.equals(config.role(), "")) { + put("role", config.role()); + } + }}); + } else { + log.info("Starting nodestore using Azure SDK 8"); + AzurePersistenceV8 persistence = AzureSegmentStoreServiceV8.createAzurePersistenceFrom(config); + registration = context.getBundleContext() + .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ + put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistenceV8.class.getName(), config.accountName(), config.rootPath())); + if (!Objects.equals(config.role(), "")) { + put("role", config.role()); + } + }}); + } } @Deactivate @@ -80,99 +85,6 @@ public void deactivate() throws IOException { registration.unregister(); registration = null; } - if (azureStorageCredentialManager != null) { - azureStorageCredentialManager.close(); - } - } - - private static AzurePersistence createAzurePersistenceFrom(Configuration configuration) throws IOException { - if (!StringUtils.isBlank(configuration.connectionURL())) { - return createPersistenceFromConnectionURL(configuration); - } - if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { - return createPersistenceFromServicePrincipalCredentials(configuration); - } - if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { - return createPersistenceFromSasUri(configuration); - } - return createPersistenceFromAccessKey(configuration); - } - - private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("AccountKey=").append(configuration.accessKey()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - return createAzurePersistence(connectionString.toString(), configuration, true); - } - - private static AzurePersistence createPersistenceFromSasUri(Configuration configuration) throws IOException { - StringBuilder connectionString = new StringBuilder(); - connectionString.append("DefaultEndpointsProtocol=https;"); - connectionString.append("AccountName=").append(configuration.accountName()).append(';'); - connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); - if (!StringUtils.isBlank(configuration.blobEndpoint())) { - connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); - } - return createAzurePersistence(connectionString.toString(), configuration, false); - } - - @NotNull - private static AzurePersistence createPersistenceFromConnectionURL(Configuration configuration) throws IOException { - return createAzurePersistence(configuration.connectionURL(), configuration, true); - } - - @NotNull - private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { - azureStorageCredentialManager = new AzureStorageCredentialManager(); - StorageCredentials storageCredentialsToken = azureStorageCredentialManager.getStorageCredentialAccessTokenFromServicePrincipals(configuration.accountName(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId()); - - try { - CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, configuration.accountName()); - return createAzurePersistence(cloud, configuration, true); - } catch (StorageException | URISyntaxException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { - try { - CloudStorageAccount cloud = CloudStorageAccount.parse(connectionString); - log.info("Connection string: '{}'", cloud); - return createAzurePersistence(cloud, configuration, createContainer); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { - throw new IOException(e); - } - } - - @NotNull - private static AzurePersistence createAzurePersistence(CloudStorageAccount cloud, Configuration configuration, boolean createContainer) throws URISyntaxException, StorageException { - CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); - BlobRequestOptions blobRequestOptions = new BlobRequestOptions(); - - if (configuration.enableSecondaryLocation()) { - blobRequestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); - } - cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); - - CloudBlobContainer container = cloudBlobClient.getContainerReference(configuration.containerName()); - if (createContainer && !container.exists()) { - container.create(); - } - String path = normalizePath(configuration.rootPath()); - return new AzurePersistence(container.getDirectoryReference(path)); - } - - @NotNull - private static String normalizePath(@NotNull String rootPath) { - if (rootPath.length() > 0 && rootPath.charAt(0) == '/') { - return rootPath.substring(1); - } - return rootPath; } } \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java index 5c740fac1d2..1aafe96ed5c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java @@ -16,18 +16,12 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.ResultContinuation; -import com.microsoft.azure.storage.ResultSegment; -import com.microsoft.azure.storage.StorageCredentials; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.StorageUri; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.LeaseStatus; -import com.microsoft.azure.storage.blob.ListBlobItem; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.AppendBlobClient; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; import org.jetbrains.annotations.NotNull; @@ -44,6 +38,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import java.util.stream.Collectors; public final class AzureUtilities { @@ -58,119 +53,33 @@ public final class AzureUtilities { private AzureUtilities() { } - public static String getName(CloudBlob blob) { + public static String getName(BlobItem blob) { return Paths.get(blob.getName()).getFileName().toString(); } - public static String getName(CloudBlobDirectory directory) { - return Paths.get(directory.getUri().getPath()).getFileName().toString(); + public static String getName(AppendBlobClient blob) { + return Paths.get(blob.getBlobName()).getFileName().toString(); } - public static List getBlobs(CloudBlobDirectory directory) throws IOException { - List blobList = new ArrayList<>(); - ResultContinuation token = null; - do { - ResultSegment result = listBlobsInSegments(directory, token); //get the blobs in pages of 5000 - for (ListBlobItem b : result.getResults()) { //add resultant blobs to list - if (b instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) b; - blobList.add(cloudBlob); - } - } - token = result.getContinuationToken(); - } while (token != null); - return blobList; + + + public static List getBlobs(BlobContainerClient blobContainerClient, ListBlobsOptions listOptions) { + return blobContainerClient.listBlobs(listOptions, null).stream().collect(Collectors.toList()); } - public static void readBufferFully(CloudBlob blob, Buffer buffer) throws IOException { + public static void readBufferFully(BlockBlobClient blob, Buffer buffer) throws IOException { try { - blob.download(new ByteBufferOutputStream(buffer)); + blob.downloadStream(new ByteBufferOutputStream(buffer)); buffer.flip(); - } catch (StorageException e) { - if (e.getHttpStatusCode() == 404) { - log.error("Blob not found in the remote repository: {}", blob.getName()); - throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getName()); + } catch (BlobStorageException e) { + if (e.getStatusCode() == 404) { + log.error("Blob not found in the remote repository: {}", blob.getBlobName()); + throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getBlobName()); } throw new RepositoryNotReachableException(e); } } - public static void deleteAllEntries(CloudBlobDirectory directory) throws IOException { - getBlobs(directory).forEach(b -> { - try { - b.deleteIfExists(); - } catch (StorageException e) { - log.error("Can't delete blob {}", b.getUri().getPath(), e); - } - }); - } - - public static CloudBlobDirectory cloudBlobDirectoryFrom(StorageCredentials credentials, - String uri, String dir) throws URISyntaxException, StorageException { - StorageUri storageUri = new StorageUri(new URI(uri)); - CloudBlobContainer container = new CloudBlobContainer(storageUri, credentials); - - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - public static CloudBlobDirectory cloudBlobDirectoryFrom(String connection, String containerName, - String dir) throws InvalidKeyException, URISyntaxException, StorageException { - CloudStorageAccount cloud = CloudStorageAccount.parse(connection); - CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(containerName); - container.createIfNotExists(); - - return container.getDirectoryReference(dir); - } - - private static ResultSegment listBlobsInSegments(CloudBlobDirectory directory, - ResultContinuation token) throws IOException { - ResultSegment result = null; - IOException lastException = null; - for (int sleep = 10; sleep <= 10000; sleep *= 10) { //increment the sleep time in steps. - try { - result = directory.listBlobsSegmented( - null, - false, - EnumSet.of(BlobListingDetails.METADATA), - 5000, - token, - null, - null); - break; //we have the results, no need to retry - } catch (StorageException | URISyntaxException e) { - lastException = new IOException(e); - try { - Thread.sleep(sleep); //Sleep and retry - } catch (InterruptedException ex) { - log.warn("Interrupted", e); - } - } - } - - if (result == null) { - throw lastException; - } else { - return result; - } - } - - public static void deleteAllBlobs(@NotNull CloudBlobDirectory directory) throws URISyntaxException, StorageException, InterruptedException { - for (ListBlobItem blobItem : directory.listBlobs()) { - if (blobItem instanceof CloudBlob) { - CloudBlob cloudBlob = (CloudBlob) blobItem; - if (cloudBlob.getProperties().getLeaseStatus() == LeaseStatus.LOCKED) { - cloudBlob.breakLease(0); - } - cloudBlob.deleteIfExists(); - } else if (blobItem instanceof CloudBlobDirectory) { - CloudBlobDirectory cloudBlobDirectory = (CloudBlobDirectory) blobItem; - deleteAllBlobs(cloudBlobDirectory); - } - } - } - private static class ByteBufferOutputStream extends OutputStream { @NotNull diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java index f0aa1e4b1c2..b1bd54f98a7 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/Configuration.java @@ -27,7 +27,7 @@ pid = {PID}, name = "Apache Jackrabbit Oak Azure Segment Store Service", description = "Azure backend for the Oak Segment Node Store") -@interface Configuration { +public @interface Configuration { String PID = "org.apache.jackrabbit.oak.segment.azure.AzureSegmentStoreService"; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java index 9b564b63e9f..89c0f8f27d1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/ReverseFileReader.java @@ -16,15 +16,17 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlob; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobRange; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import static java.lang.Math.min; @@ -35,7 +37,9 @@ public class ReverseFileReader { private int bufferSize; - private final CloudBlob blob; + private final BlobContainerClient blobContainerClient; + + private final BlobItem blob; private byte[] buffer; @@ -43,14 +47,15 @@ public class ReverseFileReader { private int fileOffset; - public ReverseFileReader(CloudBlob blob) throws StorageException { - this (blob, BUFFER_SIZE); + public ReverseFileReader(BlobContainerClient blobContainerClient, BlobItem blob) throws BlobStorageException { + this(blobContainerClient, blob, BUFFER_SIZE); } - public ReverseFileReader(CloudBlob blob, int bufferSize) throws StorageException { + public ReverseFileReader(BlobContainerClient blobContainerClient, BlobItem blob, int bufferSize) throws BlobStorageException { + this.blobContainerClient = blobContainerClient; this.blob = blob; - if (blob.exists()) { - this.fileOffset = (int) blob.getProperties().getLength(); + if (blobContainerClient.getBlobClient(blob.getName()).exists()) { + this.fileOffset = blob.getProperties().getContentLength().intValue(); } else { this.fileOffset = 0; } @@ -67,12 +72,11 @@ private void readBlock() throws IOException { if (buffer.length > 0) { fileOffset -= buffer.length; try { - OperationContext opContext = new OperationContext(); - HashMap userHeaders = new HashMap<>(); - userHeaders.put("If-Match", "*"); - opContext.setUserHeaders(userHeaders); - blob.downloadRangeToByteArray(fileOffset, Long.valueOf(buffer.length), buffer, 0, null, null, opContext); - } catch (StorageException e) { + BlobRange blobRange = new BlobRange(Long.valueOf(fileOffset), Long.valueOf(buffer.length)); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(buffer.length); + blobContainerClient.getBlobClient(blob.getName()).downloadStreamWithResponse(outputStream, blobRange, null, new BlobRequestConditions().setIfMatch("*"), false, null, null); + buffer = outputStream.toByteArray(); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java index 9ba52ec569e..5303969d5aa 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCheck.java @@ -18,8 +18,8 @@ import com.google.common.io.Files; import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.JournalReader; import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; @@ -348,7 +348,7 @@ public void afterSegmentRead(File file, long msb, long lsb, int length, long ela private final Integer persistentCacheSizeGb; private final CloudBlobDirectory cloudBlobDirectory; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; private AzureCheck(Builder builder) { this.path = builder.path; @@ -367,7 +367,7 @@ private AzureCheck(Builder builder) { this.persistentCachePath = builder.persistentCachePath; this.persistentCacheSizeGb = builder.persistentCacheSizeGb; this.cloudBlobDirectory = builder.cloudBlobDirectory; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } private static Integer revisionsToCheckCount(Integer revisionsCount) { @@ -379,9 +379,9 @@ public int run() { SegmentNodeStorePersistence persistence; if (cloudBlobDirectory != null) { - persistence = new AzurePersistence(cloudBlobDirectory); + persistence = new AzurePersistenceV8(cloudBlobDirectory); } else { - persistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManager); + persistence = ToolUtils.newSegmentNodeStorePersistence(ToolUtils.SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); } if (persistentCachePath != null) { @@ -428,7 +428,7 @@ public int run() { e.printStackTrace(err); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java index a5e9e1a7dc4..f6ca4f97a7c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/AzureCompact.java @@ -36,8 +36,8 @@ import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.jackrabbit.oak.segment.SegmentCache; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.GCType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; @@ -307,7 +307,7 @@ public AzureCompact build() { private final CloudBlobDirectory sourceCloudBlobDirectory; private final CloudBlobDirectory destinationCloudBlobDirectory; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; private AzureCompact(Builder builder) { this.path = builder.path; @@ -324,7 +324,7 @@ private AzureCompact(Builder builder) { this.garbageThresholdPercentage = builder.garbageThresholdPercentage; this.sourceCloudBlobDirectory = builder.sourceCloudBlobDirectory; this.destinationCloudBlobDirectory = builder.destinationCloudBlobDirectory; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } public int run() throws IOException, StorageException, URISyntaxException { @@ -333,11 +333,11 @@ public int run() throws IOException, StorageException, URISyntaxException { SegmentNodeStorePersistence roPersistence; SegmentNodeStorePersistence rwPersistence; if (sourceCloudBlobDirectory != null && destinationCloudBlobDirectory != null) { - roPersistence = new AzurePersistence(sourceCloudBlobDirectory); - rwPersistence = new AzurePersistence(destinationCloudBlobDirectory); + roPersistence = new AzurePersistenceV8(sourceCloudBlobDirectory); + rwPersistence = new AzurePersistenceV8(destinationCloudBlobDirectory); } else { - roPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, path, azureStorageCredentialManager); - rwPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, targetPath, azureStorageCredentialManager); + roPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, path, azureStorageCredentialManagerV8); + rwPersistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, targetPath, azureStorageCredentialManagerV8); } if (persistentCachePath != null) { @@ -363,7 +363,7 @@ public int run() throws IOException, StorageException, URISyntaxException { CloudBlobContainer targetContainer = null; if (targetPath != null) { - CloudBlobDirectory targetDirectory = createCloudBlobDirectory(targetPath.substring(3), azureStorageCredentialManager); + CloudBlobDirectory targetDirectory = createCloudBlobDirectory(targetPath.substring(3), azureStorageCredentialManagerV8); targetContainer = targetDirectory.getContainer(); } else { targetContainer = destinationCloudBlobDirectory.getContainer(); @@ -424,7 +424,7 @@ public int run() throws IOException, StorageException, URISyntaxException { persistGCJournal(rwPersistence, newSize, gcGeneration, root); // close azure storage credential manager - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); return 0; } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java index 92fffcb101d..ff816fcccf0 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentCopy.java @@ -43,7 +43,7 @@ import java.util.concurrent.Future; import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.SegmentStoreMigrator.Segment; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; @@ -260,7 +260,7 @@ public SegmentCopy build() { private SegmentNodeStorePersistence destPersistence; private ExecutorService executor = Executors.newFixedThreadPool(READ_THREADS + 1); - private final AzureStorageCredentialManager azureStorageCredentialManager; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public SegmentCopy(Builder builder) { this.source = builder.source; @@ -273,7 +273,7 @@ public SegmentCopy(Builder builder) { this.maxSizeGb = builder.maxSizeGb; this.outWriter = builder.outWriter; this.errWriter = builder.errWriter; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } public int run() { @@ -287,7 +287,7 @@ public int run() { if (flat && destType == SegmentStoreType.TAR) { try { - srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManager); + srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManagerV8); SegmentArchiveManager sourceManager = srcPersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); @@ -365,13 +365,13 @@ public int run() { e.printStackTrace(errWriter); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } else { try { if (srcPersistence == null || destPersistence == null) { - srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManager); - destPersistence = newSegmentNodeStorePersistence(destType, destination, azureStorageCredentialManager); + srcPersistence = newSegmentNodeStorePersistence(srcType, source, azureStorageCredentialManagerV8); + destPersistence = newSegmentNodeStorePersistence(destType, destination, azureStorageCredentialManagerV8); } printMessage(outWriter, "Started segment-copy transfer!"); @@ -396,7 +396,7 @@ public int run() { e.printStackTrace(errWriter); return 1; } finally { - azureStorageCredentialManager.close(); + azureStorageCredentialManagerV8.close(); } } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java index 43677b62efb..6bba400c16c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/SegmentStoreMigrator.java @@ -23,7 +23,7 @@ import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.azure.util.Retrier; import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; @@ -290,7 +290,7 @@ public Builder withSource(File dir) { } public Builder withSource(CloudBlobDirectory dir) throws URISyntaxException, StorageException { - this.source = new AzurePersistence(dir); + this.source = new AzurePersistenceV8(dir); this.sourceName = storeDescription(SegmentStoreType.AZURE, dir.getContainer().getName() + "/" + dir.getPrefix()); return this; } @@ -314,7 +314,7 @@ public Builder withTarget(File dir) { } public Builder withTarget(CloudBlobDirectory dir) throws URISyntaxException, StorageException { - this.target = new AzurePersistence(dir); + this.target = new AzurePersistenceV8(dir); this.targetName = storeDescription(SegmentStoreType.AZURE, dir.getContainer().getName() + "/" + dir.getPrefix()); return this; } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java index 172b7b2dccb..18592f6ad3f 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtils.java @@ -38,9 +38,9 @@ import java.util.concurrent.TimeUnit; import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; import org.apache.jackrabbit.oak.segment.file.*; @@ -64,7 +64,6 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -131,14 +130,14 @@ public static SegmentNodeStorePersistence decorateWithCache(SegmentNodeStorePers public static SegmentNodeStorePersistence newSegmentNodeStorePersistence(SegmentStoreType storeType, String pathOrUri, - @Nullable AzureStorageCredentialManager azureStorageCredentialManager) { + @Nullable AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { SegmentNodeStorePersistence persistence = null; switch (storeType) { case AZURE: - Objects.requireNonNull(azureStorageCredentialManager, "azure storage credentials manager instance cannot be null"); - CloudBlobDirectory cloudBlobDirectory = createCloudBlobDirectory(pathOrUri.substring(3), azureStorageCredentialManager); - persistence = new AzurePersistence(cloudBlobDirectory); + Objects.requireNonNull(azureStorageCredentialManagerV8, "azure storage credentials manager instance cannot be null"); + CloudBlobDirectory cloudBlobDirectory = createCloudBlobDirectory(pathOrUri.substring(3), azureStorageCredentialManagerV8); + persistence = new AzurePersistenceV8(cloudBlobDirectory); break; default: persistence = new TarPersistence(new File(pathOrUri)); @@ -160,13 +159,13 @@ public static SegmentArchiveManager createArchiveManager(SegmentNodeStorePersist return archiveManager; } - public static CloudBlobDirectory createCloudBlobDirectory(String path, AzureStorageCredentialManager azureStorageCredentialManager) { - return createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); + public static CloudBlobDirectory createCloudBlobDirectory(String path, AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { + return createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); } public static CloudBlobDirectory createCloudBlobDirectory(String path, Environment environment, - AzureStorageCredentialManager azureStorageCredentialManager) { + AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { Map config = parseAzureConfigurationFromUri(path); String accountName = config.get(KEY_ACCOUNT_NAME); @@ -175,14 +174,14 @@ public static CloudBlobDirectory createCloudBlobDirectory(String path, if (config.containsKey(KEY_SHARED_ACCESS_SIGNATURE)) { credentials = new StorageCredentialsSharedAccessSignature(config.get(KEY_SHARED_ACCESS_SIGNATURE)); } else { - credentials = azureStorageCredentialManager.getStorageCredentialsFromEnvironment(accountName, environment); + credentials = azureStorageCredentialManagerV8.getStorageCredentialsFromEnvironment(accountName, environment); } String uri = config.get(KEY_STORAGE_URI); String dir = config.get(KEY_DIR); try { - return AzureUtilities.cloudBlobDirectoryFrom(credentials, uri, dir); + return AzureUtilitiesV8.cloudBlobDirectoryFrom(credentials, uri, dir); } catch (URISyntaxException | StorageException e) { throw new IllegalArgumentException( "Could not connect to the Azure Storage. Please verify the path provided!"); @@ -190,8 +189,8 @@ public static CloudBlobDirectory createCloudBlobDirectory(String path, } public static List readRevisions(String uri) { - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - SegmentNodeStorePersistence persistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, uri, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + SegmentNodeStorePersistence persistence = newSegmentNodeStorePersistence(SegmentStoreType.AZURE, uri, azureStorageCredentialManagerV8); JournalFile journal = persistence.getJournalFile(); if (journal.exists()) { try (JournalReader journalReader = new JournalReader(journal)) { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index 24452d15ea9..d5ed8394183 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -19,10 +19,8 @@ package org.apache.jackrabbit.oak.segment.azure.util; -import com.microsoft.azure.storage.RetryLinearRetry; -import com.microsoft.azure.storage.blob.BlobRequestOptions; - -import java.util.concurrent.TimeUnit; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; public class AzureRequestOptions { @@ -45,52 +43,43 @@ public class AzureRequestOptions { private AzureRequestOptions() { } - /** - * Apply default request options to the blobRequestOptions if they are not already set. - * @param blobRequestOptions - */ - public static void applyDefaultRequestOptions(BlobRequestOptions blobRequestOptions) { - if (blobRequestOptions.getRetryPolicyFactory() == null) { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - if (retryAttempts > 0) { - Integer retryBackoffSeconds = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); - blobRequestOptions.setRetryPolicyFactory(new RetryLinearRetry((int) TimeUnit.SECONDS.toMillis(retryBackoffSeconds), retryAttempts)); - } - } - if (blobRequestOptions.getMaximumExecutionTimeInMs() == null) { - int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - if (timeoutExecution > 0) { - blobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(timeoutExecution)); - } - } - if (blobRequestOptions.getTimeoutIntervalInMs() == null) { - int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - if (timeoutInterval > 0) { - blobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(timeoutInterval)); - } - } + + public static RequestRetryOptions getRetryOptionsDefault() { + return getRetryOptionsDefault(null); + } + + public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + long timeoutIntervalToMs = timeoutInterval * 1_000L; + long timeoutIntervalMax = timeoutIntervalToMs * 5; + + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + retryAttempts, + timeoutExecution, + timeoutIntervalToMs, + timeoutIntervalMax, + secondaryHost); } /** - * Optimise the blob request options for write operations. This method does not change the original blobRequestOptions. - * This method also applies the default request options if they are not already set, by calling {@link #applyDefaultRequestOptions(BlobRequestOptions)} - * @param blobRequestOptions - * @return write optimised blobRequestOptions + * secondaryHost is null because there is no writer in secondary + * @return */ - public static BlobRequestOptions optimiseForWriteOperations(BlobRequestOptions blobRequestOptions) { - BlobRequestOptions writeOptimisedBlobRequestOptions = new BlobRequestOptions(blobRequestOptions); - applyDefaultRequestOptions(writeOptimisedBlobRequestOptions); - - Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP); - if (writeTimeoutExecution != null) { - writeOptimisedBlobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutExecution)); - } - - Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP); - if (writeTimeoutInterval != null) { - writeOptimisedBlobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutInterval)); - } - - return writeOptimisedBlobRequestOptions; + public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + long writeTimeoutIntervalToMs = writeTimeoutInterval * 1_000L; + long writeTimeoutIntervalMax = writeTimeoutIntervalToMs * 5; + + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + retryAttempts, + writeTimeoutExecution, + writeTimeoutIntervalToMs, + writeTimeoutIntervalMax, + null); } -} + +} \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java new file mode 100644 index 00000000000..59700d656f9 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.segment.azure.util; + +import com.microsoft.azure.storage.RetryLinearRetry; +import com.microsoft.azure.storage.blob.BlobRequestOptions; + +import java.util.concurrent.TimeUnit; + +public class AzureRequestOptionsV8 { + + static final String RETRY_ATTEMPTS_PROP = "segment.azure.retry.attempts"; + static final int DEFAULT_RETRY_ATTEMPTS = 5; + + static final String RETRY_BACKOFF_PROP = "segment.azure.retry.backoff"; + static final int DEFAULT_RETRY_BACKOFF_SECONDS = 5; + + static final String TIMEOUT_EXECUTION_PROP = "segment.timeout.execution"; + static final int DEFAULT_TIMEOUT_EXECUTION = 30; + + static final String TIMEOUT_INTERVAL_PROP = "segment.timeout.interval"; + static final int DEFAULT_TIMEOUT_INTERVAL = 1; + + static final String WRITE_TIMEOUT_EXECUTION_PROP = "segment.write.timeout.execution"; + + static final String WRITE_TIMEOUT_INTERVAL_PROP = "segment.write.timeout.interval"; + + private AzureRequestOptionsV8() { + } + + /** + * Apply default request options to the blobRequestOptions if they are not already set. + * @param blobRequestOptions + */ + public static void applyDefaultRequestOptions(BlobRequestOptions blobRequestOptions) { + if (blobRequestOptions.getRetryPolicyFactory() == null) { + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + if (retryAttempts > 0) { + Integer retryBackoffSeconds = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); + blobRequestOptions.setRetryPolicyFactory(new RetryLinearRetry((int) TimeUnit.SECONDS.toMillis(retryBackoffSeconds), retryAttempts)); + } + } + if (blobRequestOptions.getMaximumExecutionTimeInMs() == null) { + int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + if (timeoutExecution > 0) { + blobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(timeoutExecution)); + } + } + if (blobRequestOptions.getTimeoutIntervalInMs() == null) { + int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + if (timeoutInterval > 0) { + blobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(timeoutInterval)); + } + } + } + + /** + * Optimise the blob request options for write operations. This method does not change the original blobRequestOptions. + * This method also applies the default request options if they are not already set, by calling {@link #applyDefaultRequestOptions(BlobRequestOptions)} + * @param blobRequestOptions + * @return write optimised blobRequestOptions + */ + public static BlobRequestOptions optimiseForWriteOperations(BlobRequestOptions blobRequestOptions) { + BlobRequestOptions writeOptimisedBlobRequestOptions = new BlobRequestOptions(blobRequestOptions); + applyDefaultRequestOptions(writeOptimisedBlobRequestOptions); + + Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP); + if (writeTimeoutExecution != null) { + writeOptimisedBlobRequestOptions.setMaximumExecutionTimeInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutExecution)); + } + + Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP); + if (writeTimeoutInterval != null) { + writeOptimisedBlobRequestOptions.setTimeoutIntervalInMs((int) TimeUnit.SECONDS.toMillis(writeTimeoutInterval)); + } + + return writeOptimisedBlobRequestOptions; + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java new file mode 100644 index 00000000000..a6e8d6821ab --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CopyStatus; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.remote.RemoteUtilities; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.UUID; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.apache.jackrabbit.guava.common.base.Preconditions.checkArgument; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.getName; + +public class AzureArchiveManagerV8 implements SegmentArchiveManager { + + private static final Logger log = LoggerFactory.getLogger(AzureSegmentArchiveReaderV8.class); + + protected final CloudBlobDirectory cloudBlobDirectory; + + protected final IOMonitor ioMonitor; + + protected final FileStoreMonitor monitor; + private WriteAccessController writeAccessController; + + public AzureArchiveManagerV8(CloudBlobDirectory segmentstoreDirectory, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, WriteAccessController writeAccessController) { + this.cloudBlobDirectory = segmentstoreDirectory; + this.ioMonitor = ioMonitor; + this.monitor = fileStoreMonitor; + this.writeAccessController = writeAccessController; + } + + @Override + public List listArchives() throws IOException { + try { + List archiveNames = StreamSupport.stream(cloudBlobDirectory + .listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null) + .spliterator(), false) + .filter(i -> i instanceof CloudBlobDirectory) + .map(i -> (CloudBlobDirectory) i) + .filter(i -> getName(i).endsWith(".tar")) + .map(CloudBlobDirectory::getPrefix) + .map(Paths::get) + .map(Path::getFileName) + .map(Path::toString) + .collect(Collectors.toList()); + + Iterator it = archiveNames.iterator(); + while (it.hasNext()) { + String archiveName = it.next(); + if (isArchiveEmpty(archiveName)) { + delete(archiveName); + it.remove(); + } + } + return archiveNames; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + /** + * Check if there's a valid 0000. segment in the archive + * @param archiveName + * @return true if the archive is empty (no 0000.* segment) + */ + private boolean isArchiveEmpty(String archiveName) throws IOException, URISyntaxException, StorageException { + return !getDirectory(archiveName).listBlobs("0000.").iterator().hasNext(); + } + + @Override + public SegmentArchiveReader open(String archiveName) throws IOException { + try { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + if (!archiveDirectory.getBlockBlobReference("closed").exists()) { + return null; + } + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } + + @Override + public SegmentArchiveReader forceOpen(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor); + } + + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController); + } + + @Override + public boolean delete(String archiveName) { + try { + getBlobs(archiveName) + .forEach(cloudBlob -> { + try { + writeAccessController.checkWritingAllowed(); + cloudBlob.delete(); + } catch (StorageException e) { + log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + } + }); + return true; + } catch (IOException e) { + log.error("Can't delete archive {}", archiveName, e); + return false; + } + } + + @Override + public boolean renameTo(String from, String to) { + try { + CloudBlobDirectory targetDirectory = getDirectory(to); + getBlobs(from) + .forEach(cloudBlob -> { + try { + writeAccessController.checkWritingAllowed(); + renameBlob(cloudBlob, targetDirectory); + } catch (IOException e) { + log.error("Can't rename segment {}", cloudBlob.getUri().getPath(), e); + } + }); + return true; + } catch (IOException e) { + log.error("Can't rename archive {} to {}", from, to, e); + return false; + } + } + + @Override + public void copyFile(String from, String to) throws IOException { + CloudBlobDirectory targetDirectory = getDirectory(to); + getBlobs(from) + .forEach(cloudBlob -> { + try { + copyBlob(cloudBlob, targetDirectory); + } catch (IOException e) { + log.error("Can't copy segment {}", cloudBlob.getUri().getPath(), e); + } + }); + } + + @Override + public boolean exists(String archiveName) { + try { + return getDirectory(archiveName).listBlobsSegmented(null, false, null, 1, null, null, null).getLength() > 0; + } catch (IOException | StorageException | URISyntaxException e) { + log.error("Can't check the existence of {}", archiveName, e); + return false; + } + } + + @Override + public void recoverEntries(String archiveName, LinkedHashMap entries) throws IOException { + Pattern pattern = Pattern.compile(RemoteUtilities.SEGMENT_FILE_NAME_PATTERN); + List entryList = new ArrayList<>(); + + for (CloudBlob b : getBlobs(archiveName)) { + String name = getName(b); + Matcher m = pattern.matcher(name); + if (!m.matches()) { + continue; + } + int position = Integer.parseInt(m.group(1), 16); + UUID uuid = UUID.fromString(m.group(2)); + long length = b.getProperties().getLength(); + if (length > 0) { + byte[] data = new byte[(int) length]; + try { + b.downloadToByteArray(data, 0); + } catch (StorageException e) { + throw new IOException(e); + } + entryList.add(new RecoveredEntry(position, uuid, data, name)); + } + } + Collections.sort(entryList); + + int i = 0; + for (RecoveredEntry e : entryList) { + if (e.position != i) { + log.warn("Missing entry {}.??? when recovering {}. No more segments will be read.", String.format("%04X", i), archiveName); + break; + } + log.info("Recovering segment {}/{}", archiveName, e.fileName); + entries.put(e.uuid, e.data); + i++; + } + } + + private void delete(String archiveName, Set recoveredEntries) throws IOException { + getBlobs(archiveName) + .forEach(cloudBlob -> { + if (!recoveredEntries.contains(RemoteUtilities.getSegmentUUID(getName(cloudBlob)))) { + try { + cloudBlob.delete(); + } catch (StorageException e) { + log.error("Can't delete segment {}", cloudBlob.getUri().getPath(), e); + } + } + }); + } + + /** + * Method is not deleting segments from the directory given with {@code archiveName}, if they are in the set of recovered segments. + * Reason for that is because during execution of this method, remote repository can be accessed by another application, and deleting a valid segment can + * cause consistency issues there. + */ + @Override + public void backup(@NotNull String archiveName, @NotNull String backupArchiveName, @NotNull Set recoveredEntries) throws IOException { + copyFile(archiveName, backupArchiveName); + delete(archiveName, recoveredEntries); + } + + protected CloudBlobDirectory getDirectory(String archiveName) throws IOException { + try { + return cloudBlobDirectory.getDirectoryReference(archiveName); + } catch (URISyntaxException e) { + throw new IOException(e); + } + } + + private List getBlobs(String archiveName) throws IOException { + return AzureUtilitiesV8.getBlobs(getDirectory(archiveName)); + } + + private void renameBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + copyBlob(blob, newParent); + try { + blob.delete(); + } catch (StorageException e) { + throw new IOException(e); + } + } + + private void copyBlob(CloudBlob blob, CloudBlobDirectory newParent) throws IOException { + checkArgument(blob instanceof CloudBlockBlob, "Only page blobs are supported for the rename"); + try { + String blobName = getName(blob); + CloudBlockBlob newBlob = newParent.getBlockBlobReference(blobName); + newBlob.startCopy(blob.getUri()); + + boolean isStatusPending = true; + while (isStatusPending) { + newBlob.downloadAttributes(); + if (newBlob.getCopyState().getStatus() == CopyStatus.PENDING) { + Thread.sleep(100); + } else { + isStatusPending = false; + } + } + + CopyStatus finalStatus = newBlob.getCopyState().getStatus(); + if (newBlob.getCopyState().getStatus() != CopyStatus.SUCCESS) { + throw new IOException("Invalid copy status for " + blob.getUri().getPath() + ": " + finalStatus); + } + } catch (StorageException | InterruptedException | URISyntaxException e) { + throw new IOException(e); + } + } + + private static class RecoveredEntry implements Comparable { + + private final byte[] data; + + private final UUID uuid; + + private final int position; + + private final String fileName; + + public RecoveredEntry(int position, UUID uuid, byte[] data, String fileName) { + this.data = data; + this.uuid = uuid; + this.position = position; + this.fileName = fileName; + } + + @Override + public int compareTo(RecoveredEntry o) { + return Integer.compare(this.position, o.position); + } + } + +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java new file mode 100644 index 00000000000..02d9c9f63ea --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalFileV8.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.List; + +public class AzureGCJournalFileV8 implements GCJournalFile { + + private final CloudAppendBlob gcJournal; + + public AzureGCJournalFileV8(CloudAppendBlob gcJournal) { + this.gcJournal = gcJournal; + } + + @Override + public void writeLine(String line) throws IOException { + try { + if (!gcJournal.exists()) { + gcJournal.createOrReplace(); + } + gcJournal.appendText(line + "\n", StandardCharsets.UTF_8.name(), null, null, null); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public List readLines() throws IOException { + try { + if (!gcJournal.exists()) { + return Collections.emptyList(); + } + byte[] data = new byte[(int) gcJournal.getProperties().getLength()]; + gcJournal.downloadToByteArray(data, 0); + return IOUtils.readLines(new ByteArrayInputStream(data), Charset.defaultCharset()); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public void truncate() throws IOException { + try { + if (gcJournal.exists()) { + gcJournal.delete(); + } + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java new file mode 100644 index 00000000000..00e92f520a7 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import org.apache.jackrabbit.guava.common.collect.ImmutableList; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.ListBlobItem; +import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.azure.util.CaseInsensitiveKeysMapAccess; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.apache.jackrabbit.guava.common.collect.Lists.partition; + +public class AzureJournalFileV8 implements JournalFile { + + private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8.class); + + private static final int JOURNAL_LINE_LIMIT = Integer.getInteger("org.apache.jackrabbit.oak.segment.azure.journal.lines", 40_000); + + private final CloudBlobDirectory directory; + + private final String journalNamePrefix; + + private final int lineLimit; + + private final WriteAccessController writeAccessController; + + AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController, int lineLimit) { + this.directory = directory; + this.journalNamePrefix = journalNamePrefix; + this.lineLimit = lineLimit; + this.writeAccessController = writeAccessController; + } + + public AzureJournalFileV8(CloudBlobDirectory directory, String journalNamePrefix, WriteAccessController writeAccessController) { + this(directory, journalNamePrefix, writeAccessController, JOURNAL_LINE_LIMIT); + } + + @Override + public JournalFileReader openJournalReader() throws IOException { + return new CombinedReader(getJournalBlobs()); + } + + @Override + public JournalFileWriter openJournalWriter() throws IOException { + return new AzureJournalWriter(); + } + + @Override + public String getName() { + return journalNamePrefix; + } + + @Override + public boolean exists() { + try { + return !getJournalBlobs().isEmpty(); + } catch (IOException e) { + log.error("Can't check if the file exists", e); + return false; + } + } + + private String getJournalFileName(int index) { + return String.format("%s.%03d", journalNamePrefix, index); + } + + private List getJournalBlobs() throws IOException { + try { + List result = new ArrayList<>(); + for (ListBlobItem b : directory.listBlobs(journalNamePrefix)) { + if (b instanceof CloudAppendBlob) { + result.add((CloudAppendBlob) b); + } else { + log.warn("Invalid blob type: {} {}", b.getUri(), b.getClass()); + } + } + result.sort(Comparator.comparing(AzureUtilitiesV8::getName).reversed()); + return result; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private static class AzureJournalReader implements JournalFileReader { + + private final CloudBlob blob; + + private ReverseFileReaderV8 reader; + + private boolean metadataFetched; + + private boolean firstLineReturned; + + private AzureJournalReader(CloudBlob blob) { + this.blob = blob; + } + + @Override + public String readLine() throws IOException { + if (reader == null) { + try { + if (!metadataFetched) { + blob.downloadAttributes(); + metadataFetched = true; + Map metadata = CaseInsensitiveKeysMapAccess.convert(blob.getMetadata()); + if (metadata.containsKey("lastEntry")) { + firstLineReturned = true; + return metadata.get("lastEntry"); + } + } + reader = new ReverseFileReaderV8(blob); + if (firstLineReturned) { + while("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it + } + } catch (StorageException e) { + throw new IOException(e); + } + } + return reader.readLine(); + } + + @Override + public void close() throws IOException { + } + } + + private class AzureJournalWriter implements JournalFileWriter { + + private CloudAppendBlob currentBlob; + + private int lineCount; + + private final BlobRequestOptions writeOptimisedBlobRequestOptions; + + public AzureJournalWriter() throws IOException { + writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(directory.getServiceClient().getDefaultRequestOptions()); + + List blobs = getJournalBlobs(); + if (blobs.isEmpty()) { + try { + currentBlob = directory.getAppendBlobReference(getJournalFileName(1)); + currentBlob.createOrReplace(); + currentBlob.downloadAttributes(); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } else { + currentBlob = blobs.get(0); + } + try { + currentBlob.downloadAttributes(); + } catch (StorageException e) { + throw new IOException(e); + } + String lc = currentBlob.getMetadata().get("lineCount"); + lineCount = lc == null ? 0 : Integer.parseInt(lc); + } + + @Override + public void truncate() throws IOException { + try { + writeAccessController.checkWritingAllowed(); + + for (CloudAppendBlob cloudAppendBlob : getJournalBlobs()) { + cloudAppendBlob.delete(DeleteSnapshotsOption.NONE, null, writeOptimisedBlobRequestOptions, null); + } + + createNextFile(0); + } catch (StorageException e) { + throw new IOException(e); + } + } + + @Override + public void writeLine(String line) throws IOException { + batchWriteLines(ImmutableList.of(line)); + } + + @Override + public void batchWriteLines(List lines) throws IOException { + writeAccessController.checkWritingAllowed(); + + if (lines.isEmpty()) { + return; + } + int firstBlockSize = Math.min(lineLimit - lineCount, lines.size()); + List firstBlock = lines.subList(0, firstBlockSize); + List> remainingBlocks = partition(lines.subList(firstBlockSize, lines.size()), lineLimit); + List> allBlocks = ImmutableList.>builder() + .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) + .addAll(remainingBlocks) + .build(); + + for (List entries : allBlocks) { + if (lineCount >= lineLimit) { + int parsedSuffix = parseCurrentSuffix(); + createNextFile(parsedSuffix); + } + StringBuilder text = new StringBuilder(); + for (String line : entries) { + text.append(line).append("\n"); + } + try { + currentBlob.appendText(text.toString(), null, null, writeOptimisedBlobRequestOptions, null); + currentBlob.getMetadata().put("lastEntry", entries.get(entries.size() - 1)); + lineCount += entries.size(); + currentBlob.getMetadata().put("lineCount", Integer.toString(lineCount)); + currentBlob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + } + } + + private void createNextFile(int suffix) throws IOException { + try { + currentBlob = directory.getAppendBlobReference(getJournalFileName(suffix + 1)); + currentBlob.createOrReplace(null, writeOptimisedBlobRequestOptions, null); + lineCount = 0; + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private int parseCurrentSuffix() { + String name = AzureUtilitiesV8.getName(currentBlob); + Pattern pattern = Pattern.compile(Pattern.quote(journalNamePrefix) + "\\.(\\d+)" ); + Matcher matcher = pattern.matcher(name); + int parsedSuffix; + if (matcher.find()) { + String suffix = matcher.group(1); + try { + parsedSuffix = Integer.parseInt(suffix); + } catch (NumberFormatException e) { + log.warn("Can't parse suffix for journal file {}", name); + parsedSuffix = 0; + } + } else { + log.warn("Can't parse journal file name {}", name); + parsedSuffix = 0; + } + return parsedSuffix; + } + + @Override + public void close() throws IOException { + // do nothing + } + } + + private static class CombinedReader implements JournalFileReader { + + private final Iterator readers; + + private JournalFileReader currentReader; + + private CombinedReader(List blobs) { + readers = blobs.stream().map(AzureJournalReader::new).iterator(); + } + + @Override + public String readLine() throws IOException { + String line; + do { + if (currentReader == null) { + if (!readers.hasNext()) { + return null; + } + currentReader = readers.next(); + } + do { + line = currentReader.readLine(); + } while ("".equals(line)); + if (line == null) { + currentReader.close(); + currentReader = null; + } + } while (line == null); + return line; + } + + @Override + public void close() throws IOException { + while (readers.hasNext()) { + readers.next().close(); + } + if (currentReader != null) { + currentReader.close(); + currentReader = null; + } + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java new file mode 100644 index 00000000000..28568c780f6 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Properties; + +public class AzureManifestFileV8 implements ManifestFile { + + private static final Logger log = LoggerFactory.getLogger(AzureManifestFileV8.class); + + private final CloudBlockBlob manifestBlob; + + public AzureManifestFileV8(CloudBlockBlob manifestBlob) { + this.manifestBlob = manifestBlob; + } + + @Override + public boolean exists() { + try { + return manifestBlob.exists(); + } catch (StorageException e) { + log.error("Can't check if the manifest exists", e); + return false; + } + } + + @Override + public Properties load() throws IOException { + Properties properties = new Properties(); + if (exists()) { + long length = manifestBlob.getProperties().getLength(); + byte[] data = new byte[(int) length]; + try { + manifestBlob.downloadToByteArray(data, 0); + } catch (StorageException e) { + throw new IOException(e); + } + properties.load(new ByteArrayInputStream(data)); + } + return properties; + } + + @Override + public void save(Properties properties) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + properties.store(bos, null); + + byte[] data = bos.toByteArray(); + try { + manifestBlob.uploadFromByteArray(data, 0, data.length); + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java new file mode 100644 index 00000000000..f8d5de9ed3b --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzurePersistenceV8.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Paths; +import java.util.Date; +import java.util.EnumSet; +import java.util.concurrent.TimeUnit; + +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.RequestCompletedEvent; +import com.microsoft.azure.storage.StorageEvent; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AzurePersistenceV8 implements SegmentNodeStorePersistence { + private static final Logger log = LoggerFactory.getLogger(AzurePersistenceV8.class); + + protected final CloudBlobDirectory segmentstoreDirectory; + + protected WriteAccessController writeAccessController = new WriteAccessController(); + + public AzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { + this.segmentstoreDirectory = segmentStoreDirectory; + + AzureRequestOptionsV8.applyDefaultRequestOptions(segmentStoreDirectory.getServiceClient().getDefaultRequestOptions()); + } + + @Override + public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + attachRemoteStoreMonitor(remoteStoreMonitor); + return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController); + } + + @Override + public boolean segmentFilesExist() { + try { + for (ListBlobItem i : segmentstoreDirectory.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), null, null)) { + if (i instanceof CloudBlobDirectory) { + CloudBlobDirectory dir = (CloudBlobDirectory) i; + String name = Paths.get(dir.getPrefix()).getFileName().toString(); + if (name.endsWith(".tar")) { + return true; + } + } + } + return false; + } catch (StorageException | URISyntaxException e) { + log.error("Can't check if the segment archives exists", e); + return false; + } + } + + @Override + public JournalFile getJournalFile() { + return new AzureJournalFileV8(segmentstoreDirectory, "journal.log", writeAccessController); + } + + @Override + public GCJournalFile getGCJournalFile() throws IOException { + return new AzureGCJournalFileV8(getAppendBlob("gc.log")); + } + + @Override + public ManifestFile getManifestFile() throws IOException { + return new AzureManifestFileV8(getBlockBlob("manifest")); + } + + @Override + public RepositoryLock lockRepository() throws IOException { + return new AzureRepositoryLockV8(getBlockBlob("repo.lock"), () -> { + log.warn("Lost connection to the Azure. The client will be closed."); + // TODO close the connection + }, writeAccessController).lock(); + } + + private CloudBlockBlob getBlockBlob(String path) throws IOException { + try { + return segmentstoreDirectory.getBlockBlobReference(path); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private CloudAppendBlob getAppendBlob(String path) throws IOException { + try { + return segmentstoreDirectory.getAppendBlobReference(path); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private static void attachRemoteStoreMonitor(RemoteStoreMonitor remoteStoreMonitor) { + OperationContext.getGlobalRequestCompletedEventHandler().addListener(new StorageEvent() { + + @Override + public void eventOccurred(RequestCompletedEvent e) { + Date startDate = e.getRequestResult().getStartDate(); + Date stopDate = e.getRequestResult().getStopDate(); + + if (startDate != null && stopDate != null) { + long requestDuration = stopDate.getTime() - startDate.getTime(); + remoteStoreMonitor.requestDuration(requestDuration, TimeUnit.MILLISECONDS); + } + + Exception exception = e.getRequestResult().getException(); + + if (exception == null) { + remoteStoreMonitor.requestCount(); + } else { + remoteStoreMonitor.requestError(); + } + } + + }); + } + + public CloudBlobDirectory getSegmentstoreDirectory() { + return segmentstoreDirectory; + } + + public void setWriteAccessController(WriteAccessController writeAccessController) { + this.writeAccessController = writeAccessController; + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java new file mode 100644 index 00000000000..c957af7236f --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.AccessCondition; +import com.microsoft.azure.storage.Constants; +import com.microsoft.azure.storage.RetryNoRetry; +import com.microsoft.azure.storage.StorageErrorCode; +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class AzureRepositoryLockV8 implements RepositoryLock { + + private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8.class); + + private static final int TIMEOUT_SEC = Integer.getInteger("oak.segment.azure.lock.timeout", 0); + private static final Integer LEASE_RENEWAL_TIMEOUT_MS = 5000; + + public static final String LEASE_DURATION_PROP = "oak.segment.azure.lock.leaseDurationInSec"; + private final int leaseDuration = Integer.getInteger(LEASE_DURATION_PROP, 60); + + public static final String RENEWAL_INTERVAL_PROP = "oak.segment.azure.lock.leaseRenewalIntervalInSec"; + private final int renewalInterval = Integer.getInteger(RENEWAL_INTERVAL_PROP, 5); + + public static final String TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP = "oak.segment.azure.lock.blockWritesAfterInSec"; + private final int timeToWaitBeforeWriteBlock = Integer.getInteger(TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, 20); + + private final Runnable shutdownHook; + + private final CloudBlockBlob blob; + + private final ExecutorService executor; + + private final int timeoutSec; + + private WriteAccessController writeAccessController; + + private String leaseId; + + private volatile boolean doUpdate; + + public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController) { + this(blob, shutdownHook, writeAccessController, TIMEOUT_SEC); + } + + public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { + this.shutdownHook = shutdownHook; + this.blob = blob; + this.executor = Executors.newSingleThreadExecutor(); + this.timeoutSec = timeoutSec; + this.writeAccessController = writeAccessController; + + if (leaseDuration < timeToWaitBeforeWriteBlock || timeToWaitBeforeWriteBlock < renewalInterval) { + throw new IllegalStateException(String.format("The value of %s must be greater than %s and the value of %s must be greater than %s", + LEASE_DURATION_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, RENEWAL_INTERVAL_PROP)); + } + } + + public AzureRepositoryLockV8 lock() throws IOException { + long start = System.currentTimeMillis(); + Exception ex = null; + do { + try { + blob.openOutputStream().close(); + + log.info("{} = {}", LEASE_DURATION_PROP, leaseDuration); + log.info("{} = {}", RENEWAL_INTERVAL_PROP, renewalInterval); + log.info("{} = {}", TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, timeToWaitBeforeWriteBlock); + + leaseId = blob.acquireLease(leaseDuration, null); + writeAccessController.enableWriting(); + log.info("Acquired lease {}", leaseId); + } catch (Exception e) { + if (ex == null) { + log.info("Can't acquire the lease. Retrying every 1s. Timeout is set to {}s.", timeoutSec); + } + ex = e; + if ((System.currentTimeMillis() - start) / 1000 < timeoutSec) { + try { + Thread.sleep(1000); + } catch (InterruptedException e1) { + throw new IOException(e1); + } + } else { + break; + } + } + } while (leaseId == null); + if (leaseId == null) { + log.error("Can't acquire the lease in {}s.", timeoutSec); + throw new IOException(ex); + } else { + executor.submit(this::refreshLease); + return this; + } + } + + private void refreshLease() { + doUpdate = true; + long lastUpdate = 0; + while (doUpdate) { + long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + try { + if (timeSinceLastUpdate > renewalInterval) { + + BlobRequestOptions requestOptions = new BlobRequestOptions(); + requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); + requestOptions.setRetryPolicyFactory(new RetryNoRetry()); + blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); + + writeAccessController.enableWriting(); + lastUpdate = System.currentTimeMillis(); + } + } catch (Exception e) { + timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + + if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { + writeAccessController.disableWriting(); + } + + if (e instanceof StorageException) { + StorageException storageException = (StorageException) e; + if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT + , StorageErrorCode.SERVICE_INTERNAL_ERROR + , StorageErrorCodeStrings.SERVER_BUSY + , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { + log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); + } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { + log.warn("Client side error. Retry in progress ...", e); + } else { + log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); + } + } else { + log.error("Can't renew the lease", e); + shutdownHook.run(); + doUpdate = false; + return; + } + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + log.error("Interrupted the lease renewal loop", e); + } + } + } + + @Override + public void unlock() throws IOException { + doUpdate = false; + executor.shutdown(); + try { + executor.awaitTermination(1, TimeUnit.MINUTES); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + releaseLease(); + } + } + + private void releaseLease() throws IOException { + try { + blob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); + blob.delete(); + log.info("Released lease {}", leaseId); + leaseId = null; + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java new file mode 100644 index 00000000000..de69711b6e0 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveReaderV8.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Map; +import java.util.UUID; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; + +public class AzureSegmentArchiveReaderV8 extends AbstractRemoteSegmentArchiveReader { + + private final CloudBlobDirectory archiveDirectory; + + private final long length; + + protected AzureSegmentArchiveReaderV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor) throws IOException { + super(ioMonitor); + this.archiveDirectory = archiveDirectory; + this.length = computeArchiveIndexAndLength(); + } + + @Override + public long length() { + return length; + } + + @Override + public String getName() { + return AzureUtilitiesV8.getName(archiveDirectory); + } + + @Override + protected long computeArchiveIndexAndLength() throws IOException { + long length = 0; + for (CloudBlob blob : AzureUtilitiesV8.getBlobs(archiveDirectory)) { + Map metadata = blob.getMetadata(); + if (AzureBlobMetadata.isSegment(metadata)) { + RemoteSegmentArchiveEntry indexEntry = AzureBlobMetadata.toIndexEntry(metadata, (int) blob.getProperties().getLength()); + index.put(new UUID(indexEntry.getMsb(), indexEntry.getLsb()), indexEntry); + } + length += blob.getProperties().getLength(); + } + + return length; + } + + @Override + protected void doReadSegmentToBuffer(String segmentFileName, Buffer buffer) throws IOException { + readBufferFully(getBlob(segmentFileName), buffer); + } + + @Override + protected Buffer doReadDataFile(String extension) throws IOException { + return readBlob(getName() + extension); + } + + @Override + protected File archivePathAsFile() { + return new File(archiveDirectory.getUri().getPath()); + } + + private CloudBlockBlob getBlob(String name) throws IOException { + try { + return archiveDirectory.getBlockBlobReference(name); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } + + private Buffer readBlob(String name) throws IOException { + try { + CloudBlockBlob blob = getBlob(name); + if (!blob.exists()) { + return null; + } + long length = blob.getProperties().getLength(); + Buffer buffer = Buffer.allocate((int) length); + AzureUtilitiesV8.readBufferFully(blob, buffer); + return buffer; + } catch (StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java new file mode 100644 index 00000000000..21ecfe8d769 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.readBufferFully; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.getSegmentFileName; +import static org.apache.jackrabbit.oak.segment.remote.RemoteUtilities.OFF_HEAP; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.concurrent.TimeUnit; + +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.azure.AzureBlobMetadata; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptionsV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.azure.util.Retrier; +import org.apache.jackrabbit.oak.segment.remote.AbstractRemoteSegmentArchiveWriter; +import org.apache.jackrabbit.oak.segment.remote.RemoteSegmentArchiveEntry; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; + +public class AzureSegmentArchiveWriterV8 extends AbstractRemoteSegmentArchiveWriter { + + private final CloudBlobDirectory archiveDirectory; + + private final Retrier retrier = Retrier.withParams( + Integer.getInteger("azure.segment.archive.writer.retries.max", 16), + Integer.getInteger("azure.segment.archive.writer.retries.intervalMs", 5000) + ); + + private final BlobRequestOptions writeOptimisedBlobRequestOptions; + + public AzureSegmentArchiveWriterV8(CloudBlobDirectory archiveDirectory, IOMonitor ioMonitor, FileStoreMonitor monitor, WriteAccessController writeAccessController) { + super(ioMonitor, monitor); + this.archiveDirectory = archiveDirectory; + this.writeAccessController = writeAccessController; + this.writeOptimisedBlobRequestOptions = AzureRequestOptionsV8.optimiseForWriteOperations(archiveDirectory.getServiceClient().getDefaultRequestOptions()); + } + + @Override + public String getName() { + return AzureUtilitiesV8.getName(archiveDirectory); + } + + @Override + protected void doWriteArchiveEntry(RemoteSegmentArchiveEntry indexEntry, byte[] data, int offset, int size) throws IOException { + + writeAccessController.checkWritingAllowed(); + + long msb = indexEntry.getMsb(); + long lsb = indexEntry.getLsb(); + String segmentName = getSegmentFileName(indexEntry); + CloudBlockBlob blob = getBlob(segmentName); + ioMonitor.beforeSegmentWrite(new File(blob.getName()), msb, lsb, size); + Stopwatch stopwatch = Stopwatch.createStarted(); + try { + blob.setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); + blob.uploadFromByteArray(data, offset, size, null, writeOptimisedBlobRequestOptions, null); + blob.uploadMetadata(null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + ioMonitor.afterSegmentWrite(new File(blob.getName()), msb, lsb, size, stopwatch.elapsed(TimeUnit.NANOSECONDS)); + } + + @Override + protected Buffer doReadArchiveEntry(RemoteSegmentArchiveEntry indexEntry) throws IOException { + Buffer buffer; + if (OFF_HEAP) { + buffer = Buffer.allocateDirect(indexEntry.getLength()); + } else { + buffer = Buffer.allocate(indexEntry.getLength()); + } + readBufferFully(getBlob(getSegmentFileName(indexEntry)), buffer); + return buffer; + } + + @Override + protected void doWriteDataFile(byte[] data, String extension) throws IOException { + retrier.execute(() -> { + try { + writeAccessController.checkWritingAllowed(); + + getBlob(getName() + extension).uploadFromByteArray(data, 0, data.length, null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + }); + } + + @Override + protected void afterQueueClosed() throws IOException { + retrier.execute(() -> { + try { + writeAccessController.checkWritingAllowed(); + + getBlob("closed").uploadFromByteArray(new byte[0], 0, 0, null, writeOptimisedBlobRequestOptions, null); + } catch (StorageException e) { + throw new IOException(e); + } + }); + } + + @Override + protected void afterQueueFlushed() { + // do nothing + } + + private CloudBlockBlob getBlob(String name) throws IOException { + try { + return archiveDirectory.getBlockBlobReference(name); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java new file mode 100644 index 00000000000..144e0e643fd --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.StorageCredentials; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.commons.lang3.StringUtils; +import org.apache.jackrabbit.oak.segment.azure.Configuration; +import org.jetbrains.annotations.NotNull; +import org.osgi.framework.ServiceRegistration; +import org.osgi.service.component.annotations.Component; +import org.osgi.service.component.annotations.ConfigurationPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +@Component( + configurationPolicy = ConfigurationPolicy.REQUIRE, + configurationPid = {Configuration.PID}) +public class AzureSegmentStoreServiceV8 { + + private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreServiceV8.class); + + public static final String DEFAULT_CONTAINER_NAME = "oak"; + + public static final String DEFAULT_ROOT_PATH = "/oak"; + + public static final boolean DEFAULT_ENABLE_SECONDARY_LOCATION = false; + public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; + + private ServiceRegistration registration; + private static AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; + + public static AzurePersistenceV8 createAzurePersistenceFrom(Configuration configuration) throws IOException { + if (!StringUtils.isBlank(configuration.connectionURL())) { + return createPersistenceFromConnectionURL(configuration); + } + if (!StringUtils.isAnyBlank(configuration.clientId(), configuration.clientSecret(), configuration.tenantId())) { + return createPersistenceFromServicePrincipalCredentials(configuration); + } + if (!StringUtils.isBlank(configuration.sharedAccessSignature())) { + return createPersistenceFromSasUri(configuration); + } + return createPersistenceFromAccessKey(configuration); + } + + private static AzurePersistenceV8 createPersistenceFromAccessKey(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("AccountKey=").append(configuration.accessKey()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + + BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() + .containerName(configuration.containerName()) + .connectionString(connectionString.toString()).buildClient(); + + return createAzurePersistence(connectionString.toString(), configuration, true); + } + + private static AzurePersistenceV8 createPersistenceFromSasUri(Configuration configuration) throws IOException { + StringBuilder connectionString = new StringBuilder(); + connectionString.append("DefaultEndpointsProtocol=https;"); + connectionString.append("AccountName=").append(configuration.accountName()).append(';'); + connectionString.append("SharedAccessSignature=").append(configuration.sharedAccessSignature()).append(';'); + if (!StringUtils.isBlank(configuration.blobEndpoint())) { + connectionString.append("BlobEndpoint=").append(configuration.blobEndpoint()).append(';'); + } + return createAzurePersistence(connectionString.toString(), configuration, false); + } + + @NotNull + private static AzurePersistenceV8 createPersistenceFromConnectionURL(Configuration configuration) throws IOException { + return createAzurePersistence(configuration.connectionURL(), configuration, true); + } + + @NotNull + private static AzurePersistenceV8 createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { + azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); + StorageCredentials storageCredentialsToken = azureStorageCredentialManagerV8.getStorageCredentialAccessTokenFromServicePrincipals(configuration.accountName(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId()); + + try { + CloudStorageAccount cloud = new CloudStorageAccount(storageCredentialsToken, true, DEFAULT_ENDPOINT_SUFFIX, configuration.accountName()); + return createAzurePersistence(cloud, configuration, true); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } + + @NotNull + private static AzurePersistenceV8 createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { + try { + CloudStorageAccount cloud = CloudStorageAccount.parse(connectionString); + log.info("Connection string: '{}'", cloud); + return createAzurePersistence(cloud, configuration, createContainer); + } catch (StorageException | URISyntaxException | InvalidKeyException e) { + throw new IOException(e); + } + } + + @NotNull + private static AzurePersistenceV8 createAzurePersistence(CloudStorageAccount cloud, Configuration configuration, boolean createContainer) throws URISyntaxException, StorageException { + CloudBlobClient cloudBlobClient = cloud.createCloudBlobClient(); + BlobRequestOptions blobRequestOptions = new BlobRequestOptions(); + + if (configuration.enableSecondaryLocation()) { + blobRequestOptions.setLocationMode(LocationMode.PRIMARY_THEN_SECONDARY); + } + cloudBlobClient.setDefaultRequestOptions(blobRequestOptions); + + CloudBlobContainer container = cloudBlobClient.getContainerReference(configuration.containerName()); + if (createContainer && !container.exists()) { + container.create(); + } + String path = normalizePath(configuration.rootPath()); + return new AzurePersistenceV8(container.getDirectoryReference(path)); + } + + @NotNull + private static String normalizePath(@NotNull String rootPath) { + if (rootPath.length() > 0 && rootPath.charAt(0) == '/') { + return rootPath.substring(1); + } + return rootPath; + } + +} \ No newline at end of file diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java similarity index 93% rename from oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java rename to oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java index 4f962f10809..6c92481979c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureStorageCredentialManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureStorageCredentialManagerV8.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.jackrabbit.oak.segment.azure; +package org.apache.jackrabbit.oak.segment.azure.v8; import com.azure.core.credential.AccessToken; import com.azure.core.credential.TokenRequestContext; @@ -39,13 +39,13 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; -public class AzureStorageCredentialManager implements Closeable { - private static final Logger log = LoggerFactory.getLogger(AzureStorageCredentialManager.class); +public class AzureStorageCredentialManagerV8 implements Closeable { + private static final Logger log = LoggerFactory.getLogger(AzureStorageCredentialManagerV8.class); private static final String AZURE_DEFAULT_SCOPE = "https://storage.azure.com/.default"; private static final long TOKEN_REFRESHER_INITIAL_DELAY = 45L; private static final long TOKEN_REFRESHER_DELAY = 1L; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java new file mode 100644 index 00000000000..382eef83ac2 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureUtilitiesV8.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.ResultContinuation; +import com.microsoft.azure.storage.ResultSegment; +import com.microsoft.azure.storage.StorageCredentials; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.StorageUri; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.LeaseStatus; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.Paths; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; + +public final class AzureUtilitiesV8 { + + public static final String AZURE_ACCOUNT_NAME = "AZURE_ACCOUNT_NAME"; + public static final String AZURE_SECRET_KEY = "AZURE_SECRET_KEY"; + public static final String AZURE_TENANT_ID = "AZURE_TENANT_ID"; + public static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID"; + public static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET"; + + private static final Logger log = LoggerFactory.getLogger(AzureUtilitiesV8.class); + + private AzureUtilitiesV8() { + } + + public static String getName(CloudBlob blob) { + return Paths.get(blob.getName()).getFileName().toString(); + } + + public static String getName(CloudBlobDirectory directory) { + return Paths.get(directory.getUri().getPath()).getFileName().toString(); + } + + public static List getBlobs(CloudBlobDirectory directory) throws IOException { + List blobList = new ArrayList<>(); + ResultContinuation token = null; + do { + ResultSegment result = listBlobsInSegments(directory, token); //get the blobs in pages of 5000 + for (ListBlobItem b : result.getResults()) { //add resultant blobs to list + if (b instanceof CloudBlob) { + CloudBlob cloudBlob = (CloudBlob) b; + blobList.add(cloudBlob); + } + } + token = result.getContinuationToken(); + } while (token != null); + return blobList; + } + + public static void readBufferFully(CloudBlob blob, Buffer buffer) throws IOException { + try { + blob.download(new ByteBufferOutputStream(buffer)); + buffer.flip(); + } catch (StorageException e) { + if (e.getHttpStatusCode() == 404) { + log.error("Blob not found in the remote repository: {}", blob.getName()); + throw new FileNotFoundException("Blob not found in the remote repository: " + blob.getName()); + } + throw new RepositoryNotReachableException(e); + } + } + + public static void deleteAllEntries(CloudBlobDirectory directory) throws IOException { + getBlobs(directory).forEach(b -> { + try { + b.deleteIfExists(); + } catch (StorageException e) { + log.error("Can't delete blob {}", b.getUri().getPath(), e); + } + }); + } + + public static CloudBlobDirectory cloudBlobDirectoryFrom(StorageCredentials credentials, + String uri, String dir) throws URISyntaxException, StorageException { + StorageUri storageUri = new StorageUri(new URI(uri)); + CloudBlobContainer container = new CloudBlobContainer(storageUri, credentials); + + container.createIfNotExists(); + + return container.getDirectoryReference(dir); + } + + public static CloudBlobDirectory cloudBlobDirectoryFrom(String connection, String containerName, + String dir) throws InvalidKeyException, URISyntaxException, StorageException { + CloudStorageAccount cloud = CloudStorageAccount.parse(connection); + CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(containerName); + container.createIfNotExists(); + + return container.getDirectoryReference(dir); + } + + private static ResultSegment listBlobsInSegments(CloudBlobDirectory directory, + ResultContinuation token) throws IOException { + ResultSegment result = null; + IOException lastException = null; + for (int sleep = 10; sleep <= 10000; sleep *= 10) { //increment the sleep time in steps. + try { + result = directory.listBlobsSegmented( + null, + false, + EnumSet.of(BlobListingDetails.METADATA), + 5000, + token, + null, + null); + break; //we have the results, no need to retry + } catch (StorageException | URISyntaxException e) { + lastException = new IOException(e); + try { + Thread.sleep(sleep); //Sleep and retry + } catch (InterruptedException ex) { + log.warn("Interrupted", e); + } + } + } + + if (result == null) { + throw lastException; + } else { + return result; + } + } + + public static void deleteAllBlobs(@NotNull CloudBlobDirectory directory) throws URISyntaxException, StorageException, InterruptedException { + for (ListBlobItem blobItem : directory.listBlobs()) { + if (blobItem instanceof CloudBlob) { + CloudBlob cloudBlob = (CloudBlob) blobItem; + if (cloudBlob.getProperties().getLeaseStatus() == LeaseStatus.LOCKED) { + cloudBlob.breakLease(0); + } + cloudBlob.deleteIfExists(); + } else if (blobItem instanceof CloudBlobDirectory) { + CloudBlobDirectory cloudBlobDirectory = (CloudBlobDirectory) blobItem; + deleteAllBlobs(cloudBlobDirectory); + } + } + } + + private static class ByteBufferOutputStream extends OutputStream { + + @NotNull + private final Buffer buffer; + + public ByteBufferOutputStream(@NotNull Buffer buffer) { + this.buffer = buffer; + } + + @Override + public void write(int b) { + buffer.put((byte) b); + } + + @Override + public void write(@NotNull byte[] bytes, int offset, int length) { + buffer.put(bytes, offset, length); + } + } + +} + + diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java new file mode 100644 index 00000000000..b81e28588b6 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/ReverseFileReaderV8.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlob; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import static java.lang.Math.min; + +public class ReverseFileReaderV8 { + + private static final int BUFFER_SIZE = 16 * 1024; + + private int bufferSize; + + private final CloudBlob blob; + + private byte[] buffer; + + private int bufferOffset; + + private int fileOffset; + + public ReverseFileReaderV8(CloudBlob blob) throws StorageException { + this (blob, BUFFER_SIZE); + } + + public ReverseFileReaderV8(CloudBlob blob, int bufferSize) throws StorageException { + this.blob = blob; + if (blob.exists()) { + this.fileOffset = (int) blob.getProperties().getLength(); + } else { + this.fileOffset = 0; + } + this.bufferSize = bufferSize; + } + + private void readBlock() throws IOException { + if (buffer == null) { + buffer = new byte[min(fileOffset, bufferSize)]; + } else if (fileOffset < buffer.length) { + buffer = new byte[fileOffset]; + } + + if (buffer.length > 0) { + fileOffset -= buffer.length; + try { + OperationContext opContext = new OperationContext(); + HashMap userHeaders = new HashMap<>(); + userHeaders.put("If-Match", "*"); + opContext.setUserHeaders(userHeaders); + blob.downloadRangeToByteArray(fileOffset, Long.valueOf(buffer.length), buffer, 0, null, null, opContext); + } catch (StorageException e) { + throw new IOException(e); + } + } + bufferOffset = buffer.length; + } + + private String readUntilNewLine() { + if (bufferOffset == -1) { + return ""; + } + int stop = bufferOffset; + while (--bufferOffset >= 0) { + if (buffer[bufferOffset] == '\n') { + break; + } + } + // bufferOffset points either the previous '\n' character or -1 + return new String(buffer, bufferOffset + 1, stop - bufferOffset - 1, Charset.defaultCharset()); + } + + public String readLine() throws IOException { + if (bufferOffset == -1 && fileOffset == 0) { + return null; + } + + if (buffer == null) { + readBlock(); + } + + List result = new ArrayList<>(1); + while (true) { + result.add(readUntilNewLine()); + if (bufferOffset > -1) { // stopped on the '\n' + break; + } + if (fileOffset == 0) { // reached the beginning of the file + break; + } + readBlock(); + } + Collections.reverse(result); + return String.join("", result); + } +} diff --git a/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java b/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java index 16c5052670e..e85e4c941b7 100644 --- a/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java +++ b/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyAzureServicePrincipalToTarTest.java @@ -19,17 +19,17 @@ package oak.apache.jackrabbit.oak.segment.azure.tool; import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.junit.Test; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assume.assumeNotNull; public class SegmentCopyAzureServicePrincipalToTarTest extends SegmentCopyTestBase { @@ -54,10 +54,10 @@ protected SegmentNodeStorePersistence getSrcPersistence() { String accountName = ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME); String path = String.format(SEGMENT_STORE_PATH_FORMAT, accountName, CONTAINER_NAME, DIR); CloudBlobDirectory cloudBlobDirectory; - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { - cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { + cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); } - return new AzurePersistence(cloudBlobDirectory); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override diff --git a/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java b/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java index edad03847fb..2913f250aa1 100644 --- a/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java +++ b/oak-segment-azure/src/test/java/oak/apache/jackrabbit/oak/segment/azure/tool/SegmentCopyTestBase.java @@ -49,7 +49,7 @@ import org.apache.jackrabbit.oak.segment.SegmentCache; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; import org.apache.jackrabbit.oak.segment.azure.tool.SegmentCopy; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils.SegmentStoreType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; @@ -235,7 +235,7 @@ protected SegmentNodeStorePersistence getTarPersistence() { } protected SegmentNodeStorePersistence getAzurePersistence() throws Exception { - return new AzurePersistence(azurite.getContainer(AZURE_CONTAINER).getDirectoryReference(AZURE_DIRECTORY)); + return new AzurePersistenceV8(azurite.getContainer(AZURE_CONTAINER).getDirectoryReference(AZURE_DIRECTORY)); } protected String getTarPersistencePathOrUri() { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java index fd1266905e3..ab9a1d3b66d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java @@ -16,17 +16,16 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.SegmentId; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; @@ -63,11 +62,7 @@ import java.io.IOException; import java.net.URISyntaxException; import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.UUID; +import java.util.*; import java.util.concurrent.TimeoutException; import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList; @@ -75,12 +70,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsNot.not; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; public class AzureArchiveManagerTest { @@ -90,17 +80,19 @@ public class AzureArchiveManagerTest { @Rule public TemporaryFolder folder = new TemporaryFolder(new File("target")); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; private AzurePersistence azurePersistence; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); azurePersistence.setWriteAccessController(writeAccessController); } @@ -110,7 +102,7 @@ public void setup() throws StorageException, InvalidKeyException, URISyntaxExcep .and(AzureRepositoryLock.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, "9"); @Test - public void testRecovery() throws StorageException, URISyntaxException, IOException { + public void testRecovery() throws BlobStorageException, IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -124,7 +116,7 @@ public void testRecovery() throws StorageException, URISyntaxException, IOExcept writer.flush(); writer.close(); - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); LinkedHashMap recovered = new LinkedHashMap<>(); manager.recoverEntries("data00000a.tar", recovered); @@ -132,7 +124,7 @@ public void testRecovery() throws StorageException, URISyntaxException, IOExcept } @Test - public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxException, IOException { + public void testBackupWithRecoveredEntries() throws BlobStorageException, IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -146,7 +138,7 @@ public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxE writer.flush(); writer.close(); - container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); LinkedHashMap recovered = new LinkedHashMap<>(); manager.recoverEntries("data00000a.tar", recovered); @@ -154,17 +146,17 @@ public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxE manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); for (int i = 0; i <= 4; i++) { - assertTrue(container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertTrue(readBlobContainerClient.getBlobClient("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); } for (int i = 5; i <= 9; i++) { - assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), readBlobContainerClient.getBlobClient("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); } } @Test - public void testUncleanStop() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStop() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -172,9 +164,10 @@ public void testUncleanStop() throws URISyntaxException, IOException, InvalidFil segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fs.close(); - container.getBlockBlobReference("oak/data00000a.tar/closed").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete(); - container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete(); + + readBlobContainerClient.getBlobClient("oak/data00000a.tar/closed").delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").delete(); + readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.gph").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); @@ -184,8 +177,8 @@ public void testUncleanStop() throws URISyntaxException, IOException, InvalidFil @Test // see OAK-8566 - public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStopWithEmptyArchive() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -202,9 +195,12 @@ public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOExcep fs.close(); // remove the segment 0000 from the second archive - ListBlobItem segment0000 = container.listBlobs("oak/data00001a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix("oak/data00001a.tar/0000."); + + BlobItem blobItem = readBlobContainerClient.listBlobs(listBlobsOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(blobItem.getName()).delete(); + readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); @@ -213,8 +209,8 @@ public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOExcep } @Test - public void testUncleanStopSegmentMissing() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testUncleanStopSegmentMissing() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -244,23 +240,31 @@ public void testUncleanStopSegmentMissing() throws URISyntaxException, IOExcepti fs.close(); // remove the segment 0002 from the second archive - ListBlobItem segment0002 = container.listBlobs("oak/data00001a.tar/0002.").iterator().next(); - ((CloudBlob) segment0002).delete(); - container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00001a.tar/0002."); + BlobItem blobItem = readBlobContainerClient.listBlobs(listOptions, null).stream().iterator().next(); + readBlobContainerClient.getBlobClient(blobItem.getName()).getBlockBlobClient().delete(); + readBlobContainerClient.getBlobClient("oak/data00001a.tar/closed").delete(); fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); //recovered archive data00001a.tar should not contain segments 0002 and 0003 - assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar/0002."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar/0003."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); - assertTrue("Backup directory should have been created", container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak"); + assertTrue("Backup directory should have been created", readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); //backup has all segments but 0002 since it was deleted before recovery - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext()); - assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext()); - assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0001."); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0002."); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00001a.tar.bak/0003."); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); //verify content from recovered segments preserved assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1")); @@ -271,7 +275,7 @@ public void testUncleanStopSegmentMissing() throws URISyntaxException, IOExcepti } @Test - public void testExists() throws IOException, URISyntaxException { + public void testExists() throws IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -290,7 +294,7 @@ public void testExists() throws IOException, URISyntaxException { } @Test - public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException { + public void testArchiveExistsAfterFlush() throws IOException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -302,7 +306,7 @@ public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException } @Test(expected = FileNotFoundException.class) - public void testSegmentDeletedAfterCreatingReader() throws IOException, URISyntaxException, StorageException { + public void testSegmentDeletedAfterCreatingReader() throws IOException, BlobStorageException { SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -316,8 +320,10 @@ public void testSegmentDeletedAfterCreatingReader() throws IOException, URISynta Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); assertNotNull(segment); - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar/0000."); + BlobItem segment0000 = readBlobContainerClient.listBlobs(listOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(segment0000.getName()).delete(); try { // FileNotFoundException should be thrown here @@ -329,9 +335,9 @@ public void testSegmentDeletedAfterCreatingReader() throws IOException, URISynta } @Test(expected = SegmentNotFoundException.class) - public void testMissngSegmentDetectedInFileStore() throws IOException, StorageException, URISyntaxException, InvalidFileStoreVersionException { + public void testMissingSegmentDetectedInFileStore() throws IOException, BlobStorageException, InvalidFileStoreVersionException { - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistence).build(); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); @@ -347,16 +353,18 @@ public void testMissngSegmentDetectedInFileStore() throws IOException, StorageEx Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); assertNotNull(segment); - ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); - ((CloudBlob) segment0000).delete(); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar/0000."); + BlobItem segment0000 = readBlobContainerClient.listBlobs(listOptions, null).iterator().next(); + readBlobContainerClient.getBlobClient(segment0000.getName()).delete(); // SegmentNotFoundException should be thrown here fileStore.readSegment(new SegmentId(fileStore, u.getMostSignificantBits(), u.getLeastSignificantBits())); } @Test - public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testReadOnlyRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -364,11 +372,14 @@ public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVe segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); rwFileStore.flush(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions,null).iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create read-only FS - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -380,14 +391,16 @@ public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVe roFileStore.close(); rwFileStore.close(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); } @Test - public void testCachingPersistenceTarRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCachingPersistenceTarRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -395,11 +408,14 @@ public void testCachingPersistenceTarRecovery() throws URISyntaxException, Inval segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); rwFileStore.flush(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + ListBlobsOptions listOptions = new ListBlobsOptions(); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create files store with split persistence - AzurePersistence azureSharedPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence azureSharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); File localFolder = folder.newFolder(); @@ -409,14 +425,16 @@ public void testCachingPersistenceTarRecovery() throws URISyntaxException, Inval // exception should not be thrown here FileStore splitPersistenceFileStore = FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build(); - assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar"); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process - assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + listOptions.setPrefix("oak/data00000a.tar.ro.bak"); + assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); } @Test - public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCollectBlobReferencesForReadOnlyFileStore() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -425,10 +443,10 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxExce rwFileStore.flush(); // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -445,8 +463,8 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxExce } @Test - public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { - AzurePersistence rwPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -455,10 +473,10 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISy rwFileStore.flush(); // file with binary references is not created yet - assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(container.getDirectoryReference("oak")); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -479,21 +497,25 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISy @Test public void testWriteAfterLosingRepoLock() throws Exception { - CloudBlobDirectory oakDirectory = container.getDirectoryReference("oak"); - AzurePersistence rwPersistence = new AzurePersistence(oakDirectory); + BlobContainerClient oakDirectory = readBlobContainerClient.getBlobClient("oak").getContainerClient(); + BlobContainerClient writeOakDirectory = writeBlobContainerClient.getBlobClient("oak").getContainerClient(); + AzurePersistence rwPersistence = new AzurePersistence(oakDirectory, writeOakDirectory, ""); + + BlockBlobClient blob = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient leaseClient = new BlobLeaseClientBuilder().blobClient(blob).buildClient(); - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blob); + BlobLeaseClient blobLeaseMocked = Mockito.spy(leaseClient); Mockito .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLease(); AzurePersistence mockedRwPersistence = Mockito.spy(rwPersistence); WriteAccessController writeAccessController = new WriteAccessController(); - AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, () -> {}, writeAccessController); - AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); + AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, writeAccessController); + AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, writeOakDirectory, "", new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); Mockito @@ -504,7 +526,7 @@ public void testWriteAfterLosingRepoLock() throws Exception { .doReturn(azureArchiveManager) .when(mockedRwPersistence).createArchiveManager(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any(), Mockito.any()); Mockito - .doReturn(new AzureJournalFile(oakDirectory, "journal.log", writeAccessController)) + .doReturn(new AzureJournalFile(oakDirectory, writeOakDirectory, "journal.log", writeAccessController)) .when(mockedRwPersistence).getJournalFile(); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build(); @@ -515,10 +537,11 @@ public void testWriteAfterLosingRepoLock() throws Exception { // simulate operation timeout when trying to renew lease Mockito.reset(blobMocked); - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + BlobStorageException storageException = + //new BlobStorageException("operation timeout", BlobErrorCode.OPERATION_TIMED_OUT, new TimeoutException()); + new BlobStorageException("operation timeout", null, new TimeoutException()); - Mockito.doThrow(storageException).when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doThrow(storageException).when(blobLeaseMocked).renewLease(); // wait till lease expires @@ -539,7 +562,7 @@ public void testWriteAfterLosingRepoLock() throws Exception { Thread.sleep(2000); // It should be possible to start another RW file store. - FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory)).build(); + FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory, writeOakDirectory, "")).build(); SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java index f83d2e7f6f6..d6cc40a0221 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java @@ -18,10 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.apache.jackrabbit.oak.segment.file.GcJournalTest; import org.junit.Before; @@ -29,24 +27,24 @@ import org.junit.Ignore; import org.junit.Test; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; public class AzureGCJournalTest extends GcJournalTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @Override protected SegmentNodeStorePersistence getPersistence() throws Exception { - return new AzurePersistence(container.getDirectoryReference("oak")); + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); } @Test diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java new file mode 100644 index 00000000000..25dba3f3bb1 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java @@ -0,0 +1,31 @@ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import org.apache.jackrabbit.guava.common.base.Stopwatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +public class AzureHttpRequestLoggingTestingPolicy implements HttpPipelinePolicy { + + private static final Logger log = LoggerFactory.getLogger(AzureHttpRequestLoggingTestingPolicy.class); + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + Stopwatch stopwatch = Stopwatch.createStarted(); + + log.info("HTTP Request: {} {}", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl()); + + return next.process().flatMap(httpResponse -> { + log.info("Status code is: {}", httpResponse.getStatusCode()); + log.info("Response time: {}ms", (stopwatch.elapsed(TimeUnit.NANOSECONDS)) / 1_000_000); + + return Mono.just(httpResponse); + }); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java index f5da9f15e6a..b54c5fdfeaa 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileTest.java @@ -16,13 +16,14 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.ListBlobItem; +import java.util.stream.Collectors; import java.util.stream.IntStream; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; import org.apache.commons.lang3.time.StopWatch; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; @@ -32,8 +33,6 @@ import org.junit.Test; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.ArrayList; import java.util.List; @@ -48,20 +47,23 @@ public class AzureJournalFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + + private BlobContainerClient writeBlobContainerClient; private AzureJournalFile journal; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - journal = new AzureJournalFile(container.getDirectoryReference("journal"), "journal.log", writeAccessController, 50); + journal = new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, "journal.log", writeAccessController, 50); } @Test - public void testSplitJournalFiles() throws IOException, URISyntaxException, StorageException { + public void testSplitJournalFiles() throws IOException { assertFalse(journal.exists()); int index = 0; @@ -81,13 +83,11 @@ public void testSplitJournalFiles() throws IOException, URISyntaxException, Stor assertJournalEntriesCount(index); } - private int countJournalBlobs() throws URISyntaxException, StorageException { - List result = new ArrayList<>(); - for (ListBlobItem b : container.getDirectoryReference("journal").listBlobs("journal.log")) { - if (b instanceof CloudAppendBlob) { - result.add((CloudAppendBlob) b); - } - } + private int countJournalBlobs() { + ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix("journal.log"); + + List result = readBlobContainerClient.listBlobs(listBlobsOptions, null).stream().collect(Collectors.toList()); return result.size(); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java index 2b2b0d35ce4..24bd3f1dd99 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java @@ -18,10 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; import org.junit.Before; import org.junit.ClassRule; @@ -40,16 +38,18 @@ public class AzureManifestFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @Test - public void testManifest() throws URISyntaxException, IOException { - ManifestFile manifestFile = new AzurePersistence(container.getDirectoryReference("oak")).getManifestFile(); + public void testManifest() throws IOException { + ManifestFile manifestFile = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak").getManifestFile(); assertFalse(manifestFile.exists()); Properties props = new Properties(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java index 63a2da26881..ba91e852556 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java @@ -18,11 +18,8 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.commons.Buffer; import org.apache.jackrabbit.oak.segment.SegmentId; import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; @@ -50,16 +47,18 @@ public class AzureReadSegmentTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @Test(expected = SegmentNotFoundException.class) - public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistence p = new AzurePersistence(container.getDirectoryReference("oak")); + public void testReadNonExistentSegmentRepositoryReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -71,8 +70,8 @@ public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxExce } @Test(expected = RepositoryNotReachableException.class) - public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { - AzurePersistence p = new ReadFailingAzurePersistence(container.getDirectoryReference("oak")); + public void testReadExistentSegmentRepositoryNotReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { + AzurePersistence p = new ReadFailingAzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -87,18 +86,17 @@ public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxExce } static class ReadFailingAzurePersistence extends AzurePersistence { - public ReadFailingAzurePersistence(CloudBlobDirectory segmentStoreDirectory) { - super(segmentStoreDirectory); + public ReadFailingAzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix) { + super(readBlobContainerClient, writeBlobContainerClient, rootPrefix); } @Override public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, - FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { - return new AzureArchiveManager(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix,ioMonitor, fileStoreMonitor, writeAccessController) { @Override public SegmentArchiveReader open(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveReader(archiveDirectory, ioMonitor) { + return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor) { @Override public Buffer readSegment(long msb, long lsb) throws IOException { throw new RepositoryNotReachableException( @@ -109,8 +107,7 @@ public Buffer readSegment(long msb, long lsb) throws IOException { @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - CloudBlobDirectory archiveDirectory = getDirectory(archiveName); - return new AzureSegmentArchiveWriter(archiveDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + return new AzureSegmentArchiveWriter(writeBlobContainerClient, rootPrefix, archiveName, ioMonitor, fileStoreMonitor, writeAccessController) { @Override public Buffer readSegment(long msb, long lsb) throws IOException { throw new RepositoryNotReachableException( diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java index 03c878c7515..619621af54d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java @@ -18,12 +18,15 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.core.http.HttpHeaderName; +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.RequestConditions; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlobLeaseClient; +import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; +import com.azure.storage.blob.specialized.BlockBlobClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; import org.junit.Before; @@ -42,6 +45,7 @@ import java.util.concurrent.TimeoutException; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.*; public class AzureRepositoryLockTest { @@ -53,11 +57,11 @@ public class AzureRepositoryLockTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient container; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + container = azurite.getReadBlobContainerClient("oak-test"); } @Rule @@ -66,11 +70,12 @@ public void setup() throws StorageException, InvalidKeyException, URISyntaxExcep .and(AzureRepositoryLock.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_BLOCK); @Test - public void testFailingLock() throws URISyntaxException, IOException, StorageException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + public void testFailingLock() throws IOException, BlobStorageException { + BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); try { - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); fail("The second lock should fail."); } catch (IOException e) { // it's fine @@ -78,12 +83,13 @@ public void testFailingLock() throws URISyntaxException, IOException, StorageExc } @Test - public void testWaitingLock() throws URISyntaxException, IOException, StorageException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + public void testWaitingLock() throws BlobStorageException, InterruptedException, IOException { + BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); Semaphore s = new Semaphore(0); new Thread(() -> { try { - RepositoryLock lock = new AzureRepositoryLock(blob, () -> {}, new WriteAccessController()).lock(); + RepositoryLock lock = new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); s.release(); Thread.sleep(1000); lock.unlock(); @@ -93,33 +99,35 @@ public void testWaitingLock() throws URISyntaxException, IOException, StorageExc }).start(); s.acquire(); - new AzureRepositoryLock(blob, () -> {}, new WriteAccessController(), 10).lock(); + new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController(), 10).lock(); } @Test - public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageException, IOException, InterruptedException { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + public void testLeaseRefreshUnsuccessful() throws BlobStorageException, IOException, InterruptedException { + BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); + BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + BlobStorageException storageException = + new BlobStorageException("operation timeout", null, new TimeoutException()); Mockito.doThrow(storageException) .doThrow(storageException) .doCallRealMethod() - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); - new AzureRepositoryLock(blobMocked, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new WriteAccessController()).lock(); // wait till lease expires Thread.sleep(16000); // reset the mock to default behaviour - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); try { - new AzureRepositoryLock(blobMocked, () -> {}, new WriteAccessController()).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, new WriteAccessController()).lock(); fail("The second lock should fail."); } catch (IOException e) { // it's fine @@ -129,22 +137,31 @@ public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageExc @Test public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { - CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); - CloudBlockBlob blobMocked = Mockito.spy(blob); + BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); + BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); // instrument the mock to throw the exception twice when renewing the lease - StorageException storageException = - new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + HttpHeaders headers = new HttpHeaders(); + headers.add(HttpHeaderName.fromString("x-ms-error-code"), BlobErrorCode.OPERATION_TIMED_OUT.toString()); + + MockAzureHttpResponse mockAzureHttpResponse = new MockAzureHttpResponse(306, "operation timeout"); + mockAzureHttpResponse.setHeaders(headers); + + BlobStorageException storageException = + new BlobStorageException("operation timeout", mockAzureHttpResponse, new TimeoutException()); + Mockito .doCallRealMethod() .doThrow(storageException) - .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + .when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) isNull(), Mockito.any(), Mockito.any()); WriteAccessController writeAccessController = new WriteAccessController(); - new AzureRepositoryLock(blobMocked, () -> {}, writeAccessController).lock(); + new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, writeAccessController).lock(); Thread thread = new Thread(() -> { @@ -166,6 +183,6 @@ public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception Thread.sleep(5000); assertTrue("after more than 9 seconds thread should be in a waiting state", thread.getState().equals(Thread.State.WAITING)); - Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doCallRealMethod().when(blobLeaseMocked).renewLeaseWithResponse((RequestConditions) any(), any(), any()); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index ff45bf67cd7..f7114c36538 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -18,8 +18,7 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.azure.storage.blob.BlobContainerClient; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; @@ -28,6 +27,7 @@ import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; import org.junit.Before; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.mockserver.client.MockServerClient; @@ -35,11 +35,8 @@ import org.mockserver.matchers.Times; import org.mockserver.model.BinaryBody; import org.mockserver.model.HttpRequest; -import shaded_package.org.apache.http.client.utils.URIBuilder; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.util.UUID; import static org.junit.Assert.assertThrows; @@ -57,18 +54,23 @@ public class AzureSegmentArchiveWriterTest { @SuppressWarnings("unused") private MockServerClient mockServerClient; - private CloudBlobContainer container; + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before public void setUp() throws Exception { - container = createCloudBlobContainer(); - System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.attempts", "0"); + System.setProperty("segment.azure.retry.attempts", "1"); System.setProperty("segment.timeout.execution", "1"); + + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @Test @@ -167,10 +169,10 @@ private void expectWriteRequests() { } @NotNull - private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { + private SegmentArchiveWriter createSegmentArchiveWriter() throws IOException { WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak"));/**/ + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak");/**/ azurePersistence.setWriteAccessController(writeAccessController); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -186,7 +188,7 @@ private static HttpRequest getCloseArchiveRequest() { private static HttpRequest getWriteBinaryReferencesRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.brf"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fdata00000a.tar.brf"); } private static HttpRequest getWriteGraphRequest() { @@ -209,15 +211,4 @@ private static HttpRequest getUploadSegmentDataRequest() { .withBody(new BinaryBody(new byte[10])); } - @NotNull - private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { - URI uri = new URIBuilder() - .setScheme("http") - .setHost(mockServerClient.remoteAddress().getHostName()) - .setPort(mockServerClient.remoteAddress().getPort()) - .setPath(BASE_PATH) - .build(); - - return new CloudBlobContainer(uri); - } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java index 4da1f99779d..d2b55b4d678 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java @@ -16,45 +16,30 @@ */ package org.apache.jackrabbit.oak.segment.azure; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.sas.BlobSasPermission; +import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; import org.apache.jackrabbit.guava.common.collect.ImmutableSet; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.*; -import java.io.IOException; -import java.net.URISyntaxException; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.Set; -import java.util.stream.StreamSupport; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.apache.sling.testing.mock.osgi.junit.OsgiContext; import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.osgi.util.converter.Converters; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import java.io.ByteArrayInputStream; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Set; +import java.util.stream.StreamSupport; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; -import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; import static java.util.stream.Collectors.toSet; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; +import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.*; +import static org.junit.Assert.*; import static org.junit.Assume.assumeNotNull; public class AzureSegmentStoreServiceTest { @@ -66,23 +51,39 @@ public class AzureSegmentStoreServiceTest { @Rule public final OsgiContext context = new OsgiContext(); - private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); - private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static BlobSasPermission READ_ONLY; + private static BlobSasPermission READ_WRITE; private static final ImmutableSet BLOBS = ImmutableSet.of("blob1", "blob2"); - private CloudBlobContainer container; - + private BlobContainerClient container; + + @BeforeClass + public static void setupTest(){ + READ_ONLY = new BlobSasPermission(); + READ_ONLY.setReadPermission(true); + READ_ONLY.setListPermission(true); + + READ_WRITE = new BlobSasPermission(); + READ_WRITE.setReadPermission(true); + READ_WRITE.setListPermission(true); + READ_WRITE.setCreatePermission(true); + READ_WRITE.setWritePermission(true); + READ_WRITE.setAddPermission(true); + System.setProperty("segment.azure.v12.enabled", "true"); + + } + @Before public void setup() throws Exception { - container = azurite.getContainer(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); + container = azurite.getReadBlobContainerClient(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); for (String blob : BLOBS) { - container.getBlockBlobReference(blob + ".txt").uploadText(blob); + container.getBlobClient(blob + ".txt").getBlockBlobClient().upload(new ByteArrayInputStream(blob.getBytes()), blob.length()); } } @Test public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + String sasToken = container.generateSas(policy(READ_ONLY), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -95,7 +96,7 @@ public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { @Test public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { - String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + String sasToken = container.generateSas(policy(READ_WRITE), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -108,8 +109,7 @@ public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { @Test public void connectWithSharedAccessSignatureURL_expired() throws Exception { - SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); - String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + String sasToken = container.generateSas(policy(READ_WRITE, -1), null); AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); @@ -176,48 +176,45 @@ public void deactivate() throws Exception { } @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { - SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); - sharedAccessBlobPolicy.setPermissions(permissions); - sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); - return sharedAccessBlobPolicy; + private static BlobServiceSasSignatureValues policy(BlobSasPermission permissions, long days) { + return new BlobServiceSasSignatureValues(OffsetDateTime.now().plusDays(days), permissions); } @NotNull - private static SharedAccessBlobPolicy policy(EnumSet permissions) { - return policy(permissions, Instant.now().plus(Duration.ofDays(7))); + private static BlobServiceSasSignatureValues policy(BlobSasPermission permissions) { + return policy(permissions, 7); } private static void assertReadAccessGranted(SegmentNodeStorePersistence persistence, Set expectedBlobs) throws Exception { - CloudBlobContainer container = getContainerFrom(persistence); + BlobContainerClient container = getContainerFrom(persistence); Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) - .map(blob -> blob.getUri().getPath()) - .map(path -> path.substring(path.lastIndexOf('/') + 1)) - .filter(name -> name.equals("test.txt") || name.startsWith("blob")) - .collect(toSet()); + .map(BlobItem::getName) + .map(path -> path.substring(path.lastIndexOf('/') + 1)) + .filter(name -> name.equals("test.txt") || name.startsWith("blob")) + .collect(toSet()); Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); assertEquals(expectedBlobNames, actualBlobNames); Set actualBlobContent = actualBlobNames.stream() - .map(name -> { - try { - return container.getBlockBlobReference(name).downloadText(); - } catch (StorageException | IOException | URISyntaxException e) { - throw new RuntimeException("Error while reading blob " + name, e); - } - }) - .collect(toSet()); + .map(name -> { + try { + return container.getBlobClient(name).downloadContent().toString(); + } catch (BlobStorageException e) { + throw new RuntimeException("Error while reading blob " + name, e); + } + }) + .collect(toSet()); assertEquals(expectedBlobs, actualBlobContent); } private static void assertWriteAccessGranted(SegmentNodeStorePersistence persistence) throws Exception { getContainerFrom(persistence) - .getBlockBlobReference("test.txt").uploadText("test"); + .getBlobClient("test.txt").upload(new ByteArrayInputStream("test".getBytes())); } - private static CloudBlobContainer getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { - return ((AzurePersistence) persistence).getSegmentstoreDirectory().getContainer(); + private static BlobContainerClient getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { + return ((AzurePersistence) persistence).getReadBlobContainerClient(); } private static void assertWriteAccessNotGranted(SegmentNodeStorePersistence persistence) { @@ -241,7 +238,7 @@ private static void assertReadAccessNotGranted(SegmentNodeStorePersistence persi private static Instant yesterday() { return Instant.now().minus(Duration.ofDays(1)); } - + private static ImmutableSet concat(ImmutableSet blobs, String element) { return ImmutableSet.builder().addAll(blobs).add(element).build(); } @@ -256,9 +253,9 @@ private static Configuration getConfigurationWithAccessKey(String accessKey) { private static Configuration getConfigurationWithConfigurationURL(String accessKey) { String connectionString = "DefaultEndpointsProtocol=https;" - + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' - + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' - + "AccountKey=" + accessKey + ';'; + + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' + + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' + + "AccountKey=" + accessKey + ';'; return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, null, connectionString, null, null, null); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java index 009ec430a71..31690e03e05 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java @@ -16,14 +16,12 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; import org.junit.Before; import org.junit.ClassRule; @@ -31,27 +29,28 @@ import org.junit.Test; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; public class AzureTarFileTest extends TarFileTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + + private BlobContainerClient writeBlobContainerClient; @Before @Override public void setUp() throws IOException { try { - container = azurite.getContainer("oak-test"); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistence.setWriteAccessController(writeAccessController); archiveManager = azurePersistence.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); - } catch (StorageException | InvalidKeyException | URISyntaxException e) { + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java index 3be1367531d..fbd872c359d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java @@ -16,14 +16,12 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; +import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; -import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; import org.junit.Before; import org.junit.ClassRule; @@ -33,16 +31,18 @@ public class AzureTarFilesTest extends TarFilesTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before @Override public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); - AzurePersistence azurePersistence = new AzurePersistence(container.getDirectoryReference("oak")); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + AzurePersistence azurePersistenceV8 = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - azurePersistence.setWriteAccessController(writeAccessController); + azurePersistenceV8.setWriteAccessController(writeAccessController); tarFiles = TarFiles.builder() .withDirectory(folder.newFolder()) .withTarRecovery((id, data, recovery) -> { @@ -52,7 +52,7 @@ public void setUp() throws Exception { .withFileStoreMonitor(new FileStoreMonitorAdapter()) .withRemoteStoreMonitor(new RemoteStoreMonitorAdapter()) .withMaxFileSize(MAX_FILE_SIZE) - .withPersistence(azurePersistence) + .withPersistence(azurePersistenceV8) .build(); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java index c27e3a703a3..a3ee015f87b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarWriterTest.java @@ -16,12 +16,10 @@ */ package org.apache.jackrabbit.oak.segment.azure; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; -import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; @@ -35,11 +33,13 @@ public class AzureTarWriterTest extends TarWriterTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before public void setUp() throws Exception { - container = azurite.getContainer("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @NotNull @@ -47,7 +47,7 @@ public void setUp() throws Exception { protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - AzureArchiveManager azureArchiveManager = new AzureArchiveManager(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController); + AzureArchiveManager azureArchiveManager = new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, "oak", new IOMonitorAdapter(), monitor, writeAccessController); return azureArchiveManager; } @@ -56,10 +56,10 @@ protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { protected SegmentArchiveManager getFailingSegmentArchiveManager() throws Exception { final WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - return new AzureArchiveManager(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController) { + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, "oak", new IOMonitorAdapter(), monitor, writeAccessController) { @Override public SegmentArchiveWriter create(String archiveName) throws IOException { - return new AzureSegmentArchiveWriter(getDirectory(archiveName), ioMonitor, monitor, writeAccessController) { + return new AzureSegmentArchiveWriter(writeBlobContainerClient, "oak", archiveName, ioMonitor, monitor, writeAccessController) { @Override public void writeGraph(@NotNull byte[] data) throws IOException { throw new IOException("test"); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java new file mode 100644 index 00000000000..c00de44aba4 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; +import org.junit.Assume; +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.utility.DockerImageName; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class AzuriteDockerRule extends ExternalResource { + + private static final DockerImageName DOCKER_IMAGE_NAME = DockerImageName.parse("mcr.microsoft.com/azure-storage/azurite:3.31.0"); + public static final String ACCOUNT_KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; + public static final String ACCOUNT_NAME = "devstoreaccount1"; + private static final AtomicReference STARTUP_EXCEPTION = new AtomicReference<>(); + + private GenericContainer azuriteContainer; + + @Override + protected void before() throws Throwable { + azuriteContainer = new GenericContainer<>(DOCKER_IMAGE_NAME) + .withExposedPorts(10000) + .withEnv(Map.of("executable", "blob")) + .withStartupTimeout(Duration.ofSeconds(30)); + + try { + azuriteContainer.start(); + } catch (IllegalStateException e) { + STARTUP_EXCEPTION.set(e); + throw e; + } + } + + @Override + protected void after() { + if (azuriteContainer != null) { + azuriteContainer.stop(); + } + } + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + before(); + } catch (IllegalStateException e) { + Assume.assumeNoException(STARTUP_EXCEPTION.get()); + throw e; + } + + List errors = new ArrayList(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + public String getBlobEndpoint() { + return "http://127.0.0.1:" + getMappedPort() + "/devstoreaccount1"; + } + + public BlobContainerClient getReadBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOptionsDefault()); + cloud.deleteIfExists(); + cloud.create(); + return cloud; + } + + public BlobContainerClient getWriteBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + return cloud; + } + + public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { + String blobEndpoint = "BlobEndpoint=" + getBlobEndpoint(); + String accountName = "AccountName=" + ACCOUNT_NAME; + String accountKey = "AccountKey=" + ACCOUNT_KEY; + + AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); + + BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .endpoint(getBlobEndpoint()) + .addPolicy(azureHttpRequestLoggingTestingPolicy) + .connectionString(("DefaultEndpointsProtocol=http;" + ";" + accountName + ";" + accountKey + ";" + blobEndpoint)) + .retryOptions(retryOptions) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + + public int getMappedPort() { + return azuriteContainer.getMappedPort(10000); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java new file mode 100644 index 00000000000..35e82ddb6da --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java @@ -0,0 +1,62 @@ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpResponse; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +public class MockAzureHttpResponse extends HttpResponse { + + private final int statusCode; + private final String body; + private HttpHeaders headers; + + public MockAzureHttpResponse(int statusCode, String body) { + super(null); + this.statusCode = statusCode; + this.body = body; + } + + @Override + public int getStatusCode() { + return statusCode; + } + + @Override + public String getHeaderValue(String name) { + return null; // Simplified for this example + } + + @Override + public HttpHeaders getHeaders() { + return this.headers; + } + + public void setHeaders(HttpHeaders headers) { + this.headers = headers; + } + + + @Override + public Flux getBody() { + return null; + } + + @Override + public Mono getBodyAsByteArray() { + return Mono.just(body.getBytes()); + } + + @Override + public Mono getBodyAsString() { + return null; + } + + @Override + public Mono getBodyAsString(Charset charset) { + return null; + } +} + diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java index 2671b1f495b..4229ba3b001 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java @@ -19,22 +19,22 @@ package org.apache.jackrabbit.oak.segment.azure.fixture; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.common.policy.RequestRetryOptions; import org.apache.jackrabbit.guava.common.io.Files; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.state.NodeStore; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.HashMap; import java.util.Map; import java.util.UUID; @@ -49,26 +49,39 @@ public class SegmentAzureFixture extends NodeStoreFixture { private Map fileStoreMap = new HashMap<>(); - private Map containerMap = new HashMap<>(); + private Map containerMap = new HashMap<>(); @Override public NodeStore createNodeStore() { AzurePersistence persistence; - CloudBlobContainer container; + BlobContainerClient writeBlobContainerClient; try { - CloudStorageAccount cloud = CloudStorageAccount.parse(AZURE_CONNECTION_STRING); - - while (true) { - String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); - container = cloud.createCloudBlobClient().getContainerReference(containerName); - if (!container.exists()) { - container.create(); - break; - } - } - CloudBlobDirectory directory = container.getDirectoryReference(AZURE_ROOT_PATH); - persistence = new AzurePersistence(directory); - } catch (StorageException | URISyntaxException | InvalidKeyException e) { + String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); + + String endpoint = String.format("https://%s.blob.core.windows.net", containerName); + + RequestRetryOptions retryOptions = AzureRequestOptions.getRetryOptionsDefault(); + BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .endpoint(endpoint) + .connectionString(AZURE_CONNECTION_STRING) + .retryOptions(retryOptions) + .buildClient(); + + BlobContainerClient reaBlobContainerClient = blobServiceClient.getBlobContainerClient(containerName); + + RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); + BlobServiceClient writeBlobServiceClient = new BlobServiceClientBuilder() + .endpoint(endpoint) + .connectionString(AZURE_CONNECTION_STRING) + .retryOptions(writeRetryOptions) + .buildClient(); + + writeBlobContainerClient = writeBlobServiceClient.getBlobContainerClient(containerName); + + writeBlobContainerClient.createIfNotExists(); + + persistence = new AzurePersistence(reaBlobContainerClient, writeBlobContainerClient, AZURE_ROOT_PATH); + } catch (BlobStorageException e) { throw new RuntimeException(e); } @@ -76,7 +89,7 @@ public NodeStore createNodeStore() { FileStore fileStore = FileStoreBuilder.fileStoreBuilder(Files.createTempDir()).withCustomPersistence(persistence).build(); NodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); fileStoreMap.put(nodeStore, fileStore); - containerMap.put(nodeStore, container); + containerMap.put(nodeStore, writeBlobContainerClient); return nodeStore; } catch (IOException | InvalidFileStoreVersionException e) { throw new RuntimeException(e); @@ -89,11 +102,11 @@ public void dispose(NodeStore nodeStore) { fs.close(); } try { - CloudBlobContainer container = containerMap.remove(nodeStore); + BlobContainerClient container = containerMap.remove(nodeStore); if (container != null) { container.deleteIfExists(); } - } catch (StorageException e) { + } catch (BlobStorageException e) { throw new RuntimeException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java new file mode 100644 index 00000000000..deff5477aab --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixtureV8.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.segment.azure.fixture; + +import org.apache.jackrabbit.guava.common.io.Files; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.spi.state.NodeStore; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +public class SegmentAzureFixtureV8 extends NodeStoreFixture { + + private static final String AZURE_CONNECTION_STRING = System.getProperty("oak.segment.azure.connection", "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"); + + private static final String AZURE_CONTAINER = System.getProperty("oak.segment.azure.container", "oak"); + + private static final String AZURE_ROOT_PATH = System.getProperty("oak.segment.azure.rootPath", "/oak"); + + private Map fileStoreMap = new HashMap<>(); + + private Map containerMap = new HashMap<>(); + + @Override + public NodeStore createNodeStore() { + AzurePersistenceV8 persistence; + CloudBlobContainer container; + try { + CloudStorageAccount cloud = CloudStorageAccount.parse(AZURE_CONNECTION_STRING); + + while (true) { + String containerName = AZURE_CONTAINER + "-" + UUID.randomUUID().toString(); + container = cloud.createCloudBlobClient().getContainerReference(containerName); + if (!container.exists()) { + container.create(); + break; + } + } + CloudBlobDirectory directory = container.getDirectoryReference(AZURE_ROOT_PATH); + persistence = new AzurePersistenceV8(directory); + } catch (StorageException | URISyntaxException | InvalidKeyException e) { + throw new RuntimeException(e); + } + + try { + FileStore fileStore = FileStoreBuilder.fileStoreBuilder(Files.createTempDir()).withCustomPersistence(persistence).build(); + NodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); + fileStoreMap.put(nodeStore, fileStore); + containerMap.put(nodeStore, container); + return nodeStore; + } catch (IOException | InvalidFileStoreVersionException e) { + throw new RuntimeException(e); + } + } + + public void dispose(NodeStore nodeStore) { + FileStore fs = fileStoreMap.remove(nodeStore); + if (fs != null) { + fs.close(); + } + try { + CloudBlobContainer container = containerMap.remove(nodeStore); + if (container != null) { + container.deleteIfExists(); + } + } catch (StorageException e) { + throw new RuntimeException(e); + } + } + + @Override + public String toString() { + return "SegmentAzure"; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java index dcd59155ba4..a7dea9cc474 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java @@ -16,20 +16,21 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.AppendBlobClient; +import org.apache.jackrabbit.oak.segment.azure.AzureJournalFile; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.JournalReader; import org.apache.jackrabbit.oak.segment.file.JournalReaderTest; -import org.apache.jackrabbit.oak.segment.azure.AzureJournalFile; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.junit.Before; import org.junit.ClassRule; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.security.InvalidKeyException; public class AzureJournalReaderTest extends JournalReaderTest { @@ -37,20 +38,25 @@ public class AzureJournalReaderTest extends JournalReaderTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); + public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } protected JournalReader createJournalReader(String s) throws IOException { try { - CloudAppendBlob blob = container.getAppendBlobReference("journal/journal.log.001"); - blob.createOrReplace(); - blob.appendText(s); - return new JournalReader(new AzureJournalFile(container.getDirectoryReference("journal"), "journal.log", new WriteAccessController())); - } catch (StorageException | URISyntaxException e) { + AppendBlobClient blob = writeBlobContainerClient.getBlobClient("journal/journal.log.001").getAppendBlobClient(); + blob.createIfNotExists(); + if (!s.equals("")){ + blob.appendBlock(new ByteArrayInputStream(s.getBytes()), s.length()); + } + + return new JournalReader(new AzureJournalFile(readBlobContainerClient, writeBlobContainerClient, "journal", new WriteAccessController())); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java index 099a0d23dd0..f49fb889b7c 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java @@ -16,36 +16,37 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; -import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.junit.Before; import org.junit.ClassRule; import java.io.IOException; -import java.net.URISyntaxException; public class AzureTarRevisionsTest extends TarRevisionsTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient readBlobContainerClient; + private BlobContainerClient writeBlobContainerClient; @Before public void setup() throws Exception { - container = azurite.getContainer("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); super.setup(); } @Override protected SegmentNodeStorePersistence getPersistence() throws IOException { try { - return new AzurePersistence(container.getDirectoryReference("oak")); - } catch (URISyntaxException e) { + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + } catch (BlobStorageException e) { throw new IOException(e); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java index 6aa742aaf6e..5d347b6f21b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/ReverseFileReaderTest.java @@ -16,20 +16,18 @@ */ package org.apache.jackrabbit.oak.segment.azure.journal; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudAppendBlob; -import com.microsoft.azure.storage.blob.CloudBlobContainer; - -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobStorageException; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.azure.ReverseFileReader; import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; +import java.io.ByteArrayInputStream; import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -40,50 +38,51 @@ public class ReverseFileReaderTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private CloudBlobContainer container; + private BlobContainerClient container; @Before - public void setup() throws StorageException, InvalidKeyException, URISyntaxException { - container = azurite.getContainer("oak-test"); - getBlob().createOrReplace(); + public void setup() throws BlobStorageException { + container = azurite.getReadBlobContainerClient("oak-test"); + container.getBlobClient("test-blob").getAppendBlobClient().createIfNotExists(); } - private CloudAppendBlob getBlob() throws URISyntaxException, StorageException { - return container.getAppendBlobReference("test-blob"); + private BlobItem getBlob() throws BlobStorageException { + return container.listBlobs().stream().filter(blobItem -> blobItem.getName().equals("test-blob")).findFirst().get(); } @Test - public void testReverseReader() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 1024, 80); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 256); + public void testReverseReader() throws IOException, BlobStorageException { + List entries = createFile(1024, 80); + ReverseFileReader reader = new ReverseFileReader(container, getBlob()); assertEquals(entries, reader); } @Test - public void testEmptyFile() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 0, 80); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 256); + public void testEmptyFile() throws IOException, BlobStorageException { + List entries = createFile(0, 80); + ReverseFileReader reader = new ReverseFileReader(container, getBlob(), 256); assertEquals(entries, reader); } @Test - public void test1ByteBlock() throws IOException, URISyntaxException, StorageException { - List entries = createFile( 10, 16); - ReverseFileReader reader = new ReverseFileReader(getBlob(), 1); + public void test1ByteBlock() throws IOException, BlobStorageException { + List entries = createFile(10, 16); + ReverseFileReader reader = new ReverseFileReader(container, getBlob(), 1); assertEquals(entries, reader); } - private List createFile(int lines, int maxLineLength) throws IOException, URISyntaxException, StorageException { + private List createFile(int lines, int maxLineLength) throws IOException, BlobStorageException { Random random = new Random(); List entries = new ArrayList<>(); - CloudAppendBlob blob = getBlob(); + BlobItem blob = getBlob(); for (int i = 0; i < lines; i++) { int entrySize = random.nextInt(maxLineLength) + 1; String entry = randomString(entrySize); try { - blob.appendText(entry + '\n'); - } catch (StorageException e) { + String text = entry + '\n'; + container.getBlobClient(blob.getName()).getAppendBlobClient().appendBlock(new ByteArrayInputStream(text.getBytes()), text.length()); + } catch (BlobStorageException e) { throw new IOException(e); } entries.add(entry); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java new file mode 100644 index 00000000000..e3ef6bb28bd --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureJournalReaderV8Test.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.file.JournalReader; +import org.apache.jackrabbit.oak.segment.file.JournalReaderTest; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureJournalFileV8; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureJournalReaderV8Test extends JournalReaderTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + protected JournalReader createJournalReader(String s) throws IOException { + try { + CloudAppendBlob blob = container.getAppendBlobReference("journal/journal.log.001"); + blob.createOrReplace(); + blob.appendText(s); + return new JournalReader(new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", new WriteAccessController())); + } catch (StorageException | URISyntaxException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java new file mode 100644 index 00000000000..f304cfb0dfa --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/AzureTarRevisionsV8Test.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.file.TarRevisionsTest; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.net.URISyntaxException; + +public class AzureTarRevisionsV8Test extends TarRevisionsTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws Exception { + container = azurite.getContainer("oak-test"); + super.setup(); + } + + @Override + protected SegmentNodeStorePersistence getPersistence() throws IOException { + try { + return new AzurePersistenceV8(container.getDirectoryReference("oak")); + } catch (URISyntaxException e) { + throw new IOException(e); + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java new file mode 100644 index 00000000000..10dd3d05a72 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/v8/ReverseFileReaderV8Test.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.journal.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.azure.v8.ReverseFileReaderV8; +import org.junit.Assert; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; + +public class ReverseFileReaderV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + getBlob().createOrReplace(); + } + + private CloudAppendBlob getBlob() throws URISyntaxException, StorageException { + return container.getAppendBlobReference("test-blob"); + } + + @Test + public void testReverseReader() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 1024, 80); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); + assertEquals(entries, reader); + } + + @Test + public void testEmptyFile() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 0, 80); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 256); + assertEquals(entries, reader); + } + + @Test + public void test1ByteBlock() throws IOException, URISyntaxException, StorageException { + List entries = createFile( 10, 16); + ReverseFileReaderV8 reader = new ReverseFileReaderV8(getBlob(), 1); + assertEquals(entries, reader); + } + + + private List createFile(int lines, int maxLineLength) throws IOException, URISyntaxException, StorageException { + Random random = new Random(); + List entries = new ArrayList<>(); + CloudAppendBlob blob = getBlob(); + for (int i = 0; i < lines; i++) { + int entrySize = random.nextInt(maxLineLength) + 1; + String entry = randomString(entrySize); + try { + blob.appendText(entry + '\n'); + } catch (StorageException e) { + throw new IOException(e); + } + entries.add(entry); + } + + entries.add(""); + Collections.reverse(entries); + return entries; + } + + private static void assertEquals(List entries, ReverseFileReaderV8 reader) throws IOException { + int i = entries.size(); + for (String e : entries) { + Assert.assertEquals("line " + (--i), e, reader.readLine()); + } + Assert.assertNull(reader.readLine()); + } + + private static String randomString(int entrySize) { + Random r = new Random(); + + StringBuilder result = new StringBuilder(); + for (int i = 0; i < entrySize; i++) { + result.append((char) ('a' + r.nextInt('z' - 'a'))); + } + + return result.toString(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java index 4ce192c794e..4fbb51555e2 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/tool/ToolUtilsTest.java @@ -37,8 +37,8 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.jetbrains.annotations.NotNull; import org.junit.After; @@ -48,11 +48,11 @@ import org.mockito.MockedStatic; import org.slf4j.LoggerFactory; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -76,16 +76,16 @@ public class ToolUtilsTest { public static final String AZURE_SECRET_KEY_WARNING = "AZURE_CLIENT_ID, AZURE_CLIENT_SECRET and AZURE_TENANT_ID environment variables empty or missing. Switching to authentication with AZURE_SECRET_KEY."; private final TestEnvironment environment = new TestEnvironment(); - private AzureStorageCredentialManager azureStorageCredentialManager; + private AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; @Before public void init() { - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @After public void clear() { - this.azureStorageCredentialManager.close(); + this.azureStorageCredentialManagerV8.close(); } @Test @@ -96,7 +96,7 @@ public void createCloudBlobDirectoryWithAccessKey() { StorageCredentialsAccountAndKey credentials = expectCredentials( StorageCredentialsAccountAndKey.class, - () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager), + () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8), DEFAULT_CONTAINER_URL ); @@ -112,7 +112,7 @@ public void createCloudBlobDirectoryWithAccessKey() { public void createCloudBlobDirectoryFailsWhenAccessKeyNotPresent() { environment.setVariable(AZURE_SECRET_KEY, null); assertThrows(IllegalArgumentException.class, () -> - ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager) + ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8) ); } @@ -120,7 +120,7 @@ public void createCloudBlobDirectoryFailsWhenAccessKeyNotPresent() { public void createCloudBlobDirectoryFailsWhenAccessKeyIsInvalid() { environment.setVariable(AZURE_SECRET_KEY, "invalid"); assertThrows(IllegalArgumentException.class, () -> - ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManager) + ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH, environment, azureStorageCredentialManagerV8) ); } @@ -130,7 +130,7 @@ public void createCloudBlobDirectoryWithSasUri() { StorageCredentialsSharedAccessSignature credentials = expectCredentials( StorageCredentialsSharedAccessSignature.class, - () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH + '?' + sasToken, azureStorageCredentialManager), + () -> ToolUtils.createCloudBlobDirectory(DEFAULT_SEGMENT_STORE_PATH + '?' + sasToken, azureStorageCredentialManagerV8), DEFAULT_CONTAINER_URL ); @@ -149,17 +149,17 @@ public void createCloudBlobDirectoryWithServicePrincipal() throws URISyntaxExcep String containerName = "oak"; String segmentStorePath = String.format(SEGMENT_STORE_PATH_FORMAT, accountName, containerName, DEFAULT_REPO_DIR); - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(segmentStorePath, ENVIRONMENT, azureStorageCredentialManager); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(segmentStorePath, ENVIRONMENT, azureStorageCredentialManagerV8); assertNotNull(cloudBlobDirectory); assertEquals(containerName, cloudBlobDirectory.getContainer().getName()); } private static T expectCredentials(Class clazz, Runnable body, String containerUrl) { ArgumentCaptor credentialsCaptor = ArgumentCaptor.forClass(clazz); - try (MockedStatic mockedAzureUtilities = mockStatic(AzureUtilities.class)) { + try (MockedStatic mockedAzureUtilities = mockStatic(AzureUtilitiesV8.class)) { body.run(); - mockedAzureUtilities.verify(() -> AzureUtilities.cloudBlobDirectoryFrom( + mockedAzureUtilities.verify(() -> AzureUtilitiesV8.cloudBlobDirectoryFrom( credentialsCaptor.capture(), eq(containerUrl), eq(DEFAULT_REPO_DIR) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java similarity index 63% rename from oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java rename to oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java index 7bbfd391a19..0f9a825da8a 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptionsV8Test.java @@ -27,7 +27,7 @@ import static org.junit.Assert.assertEquals; -public class AzureRequestOptionsTest { +public class AzureRequestOptionsV8Test { private BlobRequestOptions blobRequestOptions; @@ -38,41 +38,41 @@ public void setUp() { @Test public void testApplyDefaultRequestOptions() { - AzureRequestOptions.applyDefaultRequestOptions(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); + AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); } @Test public void testApplyDefaultRequestOptionsWithCustomTimeouts() { - System.setProperty(AzureRequestOptions.TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptions.TIMEOUT_INTERVAL_PROP, "5"); + System.setProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP, "10"); + System.setProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP, "5"); - AzureRequestOptions.applyDefaultRequestOptions(blobRequestOptions); + AzureRequestOptionsV8.applyDefaultRequestOptions(blobRequestOptions); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(blobRequestOptions.getMaximumExecutionTimeInMs())); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(blobRequestOptions.getTimeoutIntervalInMs())); - System.clearProperty(AzureRequestOptions.TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptions.TIMEOUT_INTERVAL_PROP); + System.clearProperty(AzureRequestOptionsV8.TIMEOUT_EXECUTION_PROP); + System.clearProperty(AzureRequestOptionsV8.TIMEOUT_INTERVAL_PROP); } @Test public void testOptimiseForWriteOperations() { - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptions.optimiseForWriteOperations(blobRequestOptions); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); - assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptions.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); + BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_EXECUTION)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); + assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(AzureRequestOptionsV8.DEFAULT_TIMEOUT_INTERVAL)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); } @Test public void testOptimiseForWriteOperationsWithCustomTimeouts() { - System.setProperty(AzureRequestOptions.WRITE_TIMEOUT_EXECUTION_PROP, "10"); - System.setProperty(AzureRequestOptions.WRITE_TIMEOUT_INTERVAL_PROP, "5"); + System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP, "10"); + System.setProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP, "5"); - BlobRequestOptions writeBlobRequestoptions = AzureRequestOptions.optimiseForWriteOperations(blobRequestOptions); + BlobRequestOptions writeBlobRequestoptions = AzureRequestOptionsV8.optimiseForWriteOperations(blobRequestOptions); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(10)), Long.valueOf(writeBlobRequestoptions.getMaximumExecutionTimeInMs())); assertEquals(Long.valueOf(TimeUnit.SECONDS.toMillis(5)), Long.valueOf(writeBlobRequestoptions.getTimeoutIntervalInMs())); - System.clearProperty(AzureRequestOptions.WRITE_TIMEOUT_EXECUTION_PROP); - System.clearProperty(AzureRequestOptions.WRITE_TIMEOUT_INTERVAL_PROP); + System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_EXECUTION_PROP); + System.clearProperty(AzureRequestOptionsV8.WRITE_TIMEOUT_INTERVAL_PROP); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java new file mode 100644 index 00000000000..4fb086d05f4 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8Test.java @@ -0,0 +1,587 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.api.PropertyState; +import org.apache.jackrabbit.oak.api.Type; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.SegmentId; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.AbstractPersistentCache; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.CachingPersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.PersistentCache; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.*; +import org.junit.contrib.java.lang.system.ProvideSystemProperty; +import org.junit.rules.TemporaryFolder; +import org.mockito.Mockito; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsNot.not; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class AzureArchiveManagerV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private CloudBlobContainer container; + + private AzurePersistenceV8 azurePersistenceV8; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + azurePersistenceV8.setWriteAccessController(writeAccessController); + } + + @Rule + public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, "15") + .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, "3") + .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, "9"); + + @Test + public void testRecovery() throws StorageException, URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + + LinkedHashMap recovered = new LinkedHashMap<>(); + manager.recoverEntries("data00000a.tar", recovered); + assertEquals(uuids.subList(0, 5), newArrayList(recovered.keySet())); + } + + @Test + public void testBackupWithRecoveredEntries() throws StorageException, URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + container.getBlockBlobReference("oak/data00000a.tar/0005." + uuids.get(5).toString()).delete(); + + LinkedHashMap recovered = new LinkedHashMap<>(); + manager.recoverEntries("data00000a.tar", recovered); + + manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); + + for (int i = 0; i <= 4; i++) { + assertTrue(container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + } + + for (int i = 5; i <= 9; i++) { + assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), container.getBlockBlobReference("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + } + } + + @Test + public void testUncleanStop() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + container.getBlockBlobReference("oak/data00000a.tar/closed").delete(); + container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.brf").delete(); + container.getBlockBlobReference("oak/data00000a.tar/data00000a.tar.gph").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + fs.close(); + } + + @Test + // see OAK-8566 + public void testUncleanStopWithEmptyArchive() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // make sure there are 2 archives + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo2", "bar2"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // remove the segment 0000 from the second archive + ListBlobItem segment0000 = container.listBlobs("oak/data00001a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + fs.close(); + } + + @Test + public void testUncleanStopSegmentMissing() throws URISyntaxException, IOException, InvalidFileStoreVersionException, CommitFailedException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.close(); + + // make sure there are 2 archives + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo0", "bar0"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0001 + builder.setProperty("foo1", "bar1"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0002 + builder.setProperty("foo2", "bar2"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + //create segment 0003 + builder.setProperty("foo3", "bar3"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fs.flush(); + fs.close(); + + // remove the segment 0002 from the second archive + ListBlobItem segment0002 = container.listBlobs("oak/data00001a.tar/0002.").iterator().next(); + ((CloudBlob) segment0002).delete(); + container.getBlockBlobReference("oak/data00001a.tar/closed").delete(); + + fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); + assertEquals("bar", segmentNodeStore.getRoot().getString("foo")); + + //recovered archive data00001a.tar should not contain segments 0002 and 0003 + assertFalse(container.listBlobs("oak/data00001a.tar/0002.").iterator().hasNext()); + assertFalse(container.listBlobs("oak/data00001a.tar/0003.").iterator().hasNext()); + + assertTrue("Backup directory should have been created", container.listBlobs("oak/data00001a.tar.bak").iterator().hasNext()); + //backup has all segments but 0002 since it was deleted before recovery + assertTrue(container.listBlobs("oak/data00001a.tar.bak/0001.").iterator().hasNext()); + assertFalse(container.listBlobs("oak/data00001a.tar.bak/0002.").iterator().hasNext()); + assertTrue(container.listBlobs("oak/data00001a.tar.bak/0003.").iterator().hasNext()); + + //verify content from recovered segments preserved + assertEquals("bar1", segmentNodeStore.getRoot().getString("foo1")); + //content from deleted segments not preserved + assertNull(segmentNodeStore.getRoot().getString("foo2")); + assertNull(segmentNodeStore.getRoot().getString("foo3")); + fs.close(); + } + + @Test + public void testExists() throws IOException, URISyntaxException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + List uuids = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + uuids.add(u); + } + + writer.flush(); + writer.close(); + + Assert.assertTrue(manager.exists("data00000a.tar")); + Assert.assertFalse(manager.exists("data00001a.tar")); + } + + @Test + public void testArchiveExistsAfterFlush() throws URISyntaxException, IOException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + Assert.assertTrue(manager.exists("data00000a.tar")); + } + + @Test(expected = FileNotFoundException.class) + public void testSegmentDeletedAfterCreatingReader() throws IOException, URISyntaxException, StorageException { + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + + SegmentArchiveReader reader = manager.open("data00000a.tar"); + Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + assertNotNull(segment); + + ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + + try { + // FileNotFoundException should be thrown here + reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + fail(); + } catch (RepositoryNotReachableException e) { + fail(); + } + } + + @Test(expected = SegmentNotFoundException.class) + public void testMissngSegmentDetectedInFileStore() throws IOException, StorageException, URISyntaxException, InvalidFileStoreVersionException { + + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistenceV8).build(); + + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + + //Assert.assertFalse(manager.exists("data00000a.tar")); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + writer.close(); + + SegmentArchiveReader reader = manager.open("data00000a.tar"); + Buffer segment = reader.readSegment(u.getMostSignificantBits(), u.getLeastSignificantBits()); + assertNotNull(segment); + + ListBlobItem segment0000 = container.listBlobs("oak/data00000a.tar/0000.").iterator().next(); + ((CloudBlob) segment0000).delete(); + + // SegmentNotFoundException should be thrown here + fileStore.readSegment(new SegmentId(fileStore, u.getMostSignificantBits(), u.getLeastSignificantBits())); + } + + @Test + public void testReadOnlyRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + + // create read-only FS + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + roFileStore.close(); + rwFileStore.close(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + } + + @Test + public void testCachingPersistenceTarRecovery() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + + // create files store with split persistence + AzurePersistenceV8 azureSharedPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + + CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); + File localFolder = folder.newFolder(); + SegmentNodeStorePersistence localPersistence = new TarPersistence(localFolder); + SegmentNodeStorePersistence splitPersistence = new SplitPersistence(cachingPersistence, localPersistence); + + // exception should not be thrown here + FileStore splitPersistenceFileStore = FileStoreBuilder.fileStoreBuilder(localFolder).withCustomPersistence(splitPersistence).build(); + + assertTrue(container.getDirectoryReference("oak/data00000a.tar").listBlobs().iterator().hasNext()); + // after creating a read-only FS, the recovery procedure should not be started since there is another running Oak process + assertFalse(container.getDirectoryReference("oak/data00000a.tar.ro.bak").listBlobs().iterator().hasNext()); + } + + @Test + public void testCollectBlobReferencesForReadOnlyFileStore() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + // file with binary references is not created yet + assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + + // create read-only FS, while the rw FS is still open + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + assertDoesNotThrow(() -> roFileStore.collectBlobReferences(s -> { + })); + } + } + } + + @Test + public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws URISyntaxException, InvalidFileStoreVersionException, IOException, CommitFailedException, StorageException { + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + + // file with binary references is not created yet + assertFalse("brf file should not be present", container.getDirectoryReference("oak/data00000a.tar").getBlockBlobReference("data00000a.tar.brf").exists()); + + // create read-only FS, while the rw FS is still open + AzurePersistenceV8 roPersistence = new AzurePersistenceV8(container.getDirectoryReference("oak")); + try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { + + PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() + .getRoot() + .getProperty("foo"); + + assertThat(fooProperty, not(nullValue())); + assertThat(fooProperty.getValue(Type.STRING), equalTo("bar")); + + HashSet references = new HashSet<>(); + assertDoesNotThrow(() -> + roFileStore.collectBlobReferences(references::add)); + + assertTrue("No references should have been collected since reference file has not been created", references.isEmpty()); + } + } + } + + @Test + public void testWriteAfterLosingRepoLock() throws Exception { + CloudBlobDirectory oakDirectory = container.getDirectoryReference("oak"); + AzurePersistenceV8 rwPersistence = new AzurePersistenceV8(oakDirectory); + + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + Mockito + .doCallRealMethod() + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + AzurePersistenceV8 mockedRwPersistence = Mockito.spy(rwPersistence); + WriteAccessController writeAccessController = new WriteAccessController(); + AzureRepositoryLockV8 azureRepositoryLockV8 = new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController); + AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(oakDirectory, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); + + + Mockito + .doAnswer(invocation -> azureRepositoryLockV8.lock()) + .when(mockedRwPersistence).lockRepository(); + + Mockito + .doReturn(azureArchiveManagerV8) + .when(mockedRwPersistence).createArchiveManager(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any(), Mockito.any()); + Mockito + .doReturn(new AzureJournalFileV8(oakDirectory, "journal.log", writeAccessController)) + .when(mockedRwPersistence).getJournalFile(); + + FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(mockedRwPersistence).build(); + SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); + NodeBuilder builder = segmentNodeStore.getRoot().builder(); + + + // simulate operation timeout when trying to renew lease + Mockito.reset(blobMocked); + + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + + Mockito.doThrow(storageException).when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + + // wait till lease expires + Thread.sleep(17000); + + // try updating repository + Thread thread = new Thread(() -> { + try { + builder.setProperty("foo", "bar"); + segmentNodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + rwFileStore.flush(); + } catch (Exception e) { + fail("No Exception expected, but got: " + e.getMessage()); + } + }); + thread.start(); + + Thread.sleep(2000); + + // It should be possible to start another RW file store. + FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistenceV8(oakDirectory)).build(); + SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); + NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); + + //repository hasn't been updated + assertNull(builder2.getProperty("foo")); + + rwFileStore2.close(); + } + + private PersistentCache createPersistenceCache() { + return new AbstractPersistentCache() { + @Override + protected Buffer readSegmentInternal(long msb, long lsb) { + return null; + } + + @Override + public boolean containsSegment(long msb, long lsb) { + return false; + } + + @Override + public void writeSegment(long msb, long lsb, Buffer buffer) { + + } + + @Override + public void cleanUp() { + + } + }; + } + + private static void assertDoesNotThrow(Executable executable) { + try { + executable.execute(); + } catch (Exception e) { + fail("No Exception expected, but got: " + e.getMessage()); + } + } + + interface Executable { + void execute() throws Exception; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java new file mode 100644 index 00000000000..f431ea194b3 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureGCJournalV8Test.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.file.GcJournalTest; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureGCJournalV8Test extends GcJournalTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Override + protected SegmentNodeStorePersistence getPersistence() throws Exception { + return new AzurePersistenceV8(container.getDirectoryReference("oak")); + } + + @Test + @Ignore + @Override + public void testReadOak16GCLog() throws Exception { + super.testReadOak16GCLog(); + } + + @Test + @Ignore + @Override + public void testUpdateOak16GCLog() throws Exception { + super.testUpdateOak16GCLog(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java similarity index 94% rename from oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java rename to oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java index 415c1193401..6344b390bb3 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFileConcurrencyIT.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8ConcurrencyIT.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.jackrabbit.oak.segment.azure; +package org.apache.jackrabbit.oak.segment.azure.v8; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.StorageException; @@ -39,14 +39,14 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -public class AzureJournalFileConcurrencyIT { - private static final Logger log = LoggerFactory.getLogger(AzureJournalFileConcurrencyIT.class); +public class AzureJournalFileV8ConcurrencyIT { + private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8ConcurrencyIT.class); private static CloudBlobContainer container; private static int suffix; - private AzurePersistence persistence; + private AzurePersistenceV8 persistence; @BeforeClass public static void connectToAzure() throws URISyntaxException, InvalidKeyException, StorageException { @@ -60,7 +60,7 @@ public static void connectToAzure() throws URISyntaxException, InvalidKeyExcepti @Before public void setup() throws StorageException, InvalidKeyException, URISyntaxException, IOException, InterruptedException { - persistence = new AzurePersistence(container.getDirectoryReference("oak-" + (suffix++))); + persistence = new AzurePersistenceV8(container.getDirectoryReference("oak-" + (suffix++))); writeJournalLines(300, 0); log.info("Finished writing initial content to journal!"); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java new file mode 100644 index 00000000000..6496ca239e2 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8Test.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.ListBlobItem; +import java.util.stream.IntStream; +import org.apache.commons.lang3.time.StopWatch; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFileWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.jackrabbit.guava.common.collect.Lists.reverse; +import static java.util.stream.Collectors.toList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class AzureJournalFileV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + private AzureJournalFileV8 journal; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + journal = new AzureJournalFileV8(container.getDirectoryReference("journal"), "journal.log", writeAccessController, 50); + } + + @Test + public void testSplitJournalFiles() throws IOException, URISyntaxException, StorageException { + assertFalse(journal.exists()); + + int index = 0; + index = writeNLines(index, 10); // 10 + assertTrue(journal.exists()); + assertEquals(1, countJournalBlobs()); + + index = writeNLines(index, 20); // 30 + assertEquals(1, countJournalBlobs()); + + index = writeNLines(index, 30); // 60 + assertEquals(2, countJournalBlobs()); + + index = writeNLines(index, 100); // 160 + assertEquals(4, countJournalBlobs()); + + assertJournalEntriesCount(index); + } + + private int countJournalBlobs() throws URISyntaxException, StorageException { + List result = new ArrayList<>(); + for (ListBlobItem b : container.getDirectoryReference("journal").listBlobs("journal.log")) { + if (b instanceof CloudAppendBlob) { + result.add((CloudAppendBlob) b); + } + } + return result.size(); + } + + private int writeNLines(int index, int n) throws IOException { + try (JournalFileWriter writer = journal.openJournalWriter()) { + for (int i = 0; i < n; i++) { + writer.writeLine("line " + (index++)); + } + } + return index; + } + + @Test + public void testTruncateJournalFile() throws IOException { + assertFalse(journal.exists()); + + List lines = buildLines(0, 100); + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + + assertTrue(journal.exists()); + assertJournalEntriesCount(100); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.truncate(); + } + + assertTrue(journal.exists()); + assertJournalEntriesCount(0); + } + + @Test + public void testBatchWriteLines() throws IOException { + List lines = buildLines(0, 5000); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + + List entries = readEntriesFromJournal(); + assertEquals(lines, reverse(entries)); + } + + @Test + public void testEnsureBatchWriteLinesIsFasterThanNaiveImplementation() throws IOException { + List lines = buildLines(0, 100); + + StopWatch watchNaiveImpl = StopWatch.createStarted(); + try (JournalFileWriter writer = journal.openJournalWriter()) { + // Emulating previous naive implementation of 'batchWriteLines', which simply delegated to 'writeLine()' + for (String line : lines) { + writer.writeLine(line); + } + } + watchNaiveImpl.stop(); + + StopWatch watchOptimizedImpl = StopWatch.createStarted(); + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(lines); + } + watchOptimizedImpl.stop(); + long optimizedImplTime = watchOptimizedImpl.getTime(); + long naiveImplTime = watchNaiveImpl.getTime(); + assertTrue("batchWriteLines() should be significantly faster (>10x) than the naive implementation, but took " + + optimizedImplTime + "ms while naive implementation took " + naiveImplTime + "ms", optimizedImplTime < naiveImplTime / 10); + } + + @Test + public void testBatchWriteLines_splitJournalFile() throws Exception { + assertFalse(journal.exists()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(0, 30)); // 30 + } + assertTrue(journal.exists()); + assertEquals(1, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(30, 40)); // 70 + } + assertEquals(2, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(70, 30)); // 100 + } + assertEquals(2, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(100, 1)); // 101 + } + assertEquals(3, countJournalBlobs()); + + try (JournalFileWriter writer = journal.openJournalWriter()) { + writer.batchWriteLines(buildLines(101, 100)); // 201 + } + assertEquals(5, countJournalBlobs()); + + assertJournalEntriesCount(201); + } + + private void assertJournalEntriesCount(int index) throws IOException { + List entries = readEntriesFromJournal(); + assertEquals(buildLines(0, index), reverse(entries)); + } + + @NotNull + private static List buildLines(int start, int count) { + return IntStream.range(start, count + start) + .mapToObj(i -> "line " + i) + .collect(toList()); + } + + @NotNull + private List readEntriesFromJournal() throws IOException { + List result = new ArrayList<>(); + try (JournalFileReader reader = journal.openJournalReader()) { + String entry; + while ((entry = reader.readLine()) != null) { + result.add(entry); + } + } + return result; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java new file mode 100644 index 00000000000..8dc01a2c26a --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureManifestFileV8Test.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.Properties; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class AzureManifestFileV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Test + public void testManifest() throws URISyntaxException, IOException { + ManifestFile manifestFile = new AzurePersistenceV8(container.getDirectoryReference("oak")).getManifestFile(); + assertFalse(manifestFile.exists()); + + Properties props = new Properties(); + props.setProperty("xyz", "abc"); + props.setProperty("version", "123"); + manifestFile.save(props); + + Properties loaded = manifestFile.load(); + assertEquals(props, loaded); + } + +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java new file mode 100644 index 00000000000..ff3a2d422f6 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureReadSegmentV8Test.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.segment.SegmentId; +import org.apache.jackrabbit.oak.segment.SegmentNotFoundException; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.spi.RepositoryNotReachableException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureReadSegmentV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Test(expected = SegmentNotFoundException.class) + public void testReadNonExistentSegmentRepositoryReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { + AzurePersistenceV8 p = new AzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + SegmentId id = new SegmentId(fs, 0, 0); + + try { + fs.readSegment(id); + } finally { + fs.close(); + } + } + + @Test(expected = RepositoryNotReachableException.class) + public void testReadExistentSegmentRepositoryNotReachable() throws URISyntaxException, IOException, InvalidFileStoreVersionException, StorageException { + AzurePersistenceV8 p = new ReadFailingAzurePersistenceV8(container.getDirectoryReference("oak")); + FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); + + SegmentId id = new SegmentId(fs, 0, 0); + byte[] buffer = new byte[2]; + + try { + fs.writeSegment(id, buffer, 0, 2); + fs.readSegment(id); + } finally { + fs.close(); + } + } + + static class ReadFailingAzurePersistenceV8 extends AzurePersistenceV8 { + public ReadFailingAzurePersistenceV8(CloudBlobDirectory segmentStoreDirectory) { + super(segmentStoreDirectory); + } + + @Override + public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, + FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { + return new AzureArchiveManagerV8(segmentstoreDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + @Override + public SegmentArchiveReader open(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveReaderV8(archiveDirectory, ioMonitor) { + @Override + public Buffer readSegment(long msb, long lsb) throws IOException { + throw new RepositoryNotReachableException( + new RuntimeException("Cannot access Azure storage")); + } + }; + } + + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + CloudBlobDirectory archiveDirectory = getDirectory(archiveName); + return new AzureSegmentArchiveWriterV8(archiveDirectory, ioMonitor, fileStoreMonitor, writeAccessController) { + @Override + public Buffer readSegment(long msb, long lsb) throws IOException { + throw new RepositoryNotReachableException( + new RuntimeException("Cannot access Azure storage")); } + }; + } + }; + } + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java new file mode 100644 index 00000000000..d645a1743f0 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8Test.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageErrorCodeStrings; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.contrib.java.lang.system.ProvideSystemProperty; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeoutException; + +import static org.junit.Assert.*; + +public class AzureRepositoryLockV8Test { + + private static final Logger log = LoggerFactory.getLogger(AzureRepositoryLockV8Test.class); + public static final String LEASE_DURATION = "15"; + public static final String RENEWAL_INTERVAL = "3"; + public static final String TIME_TO_WAIT_BEFORE_BLOCK = "9"; + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setup() throws StorageException, InvalidKeyException, URISyntaxException { + container = azurite.getContainer("oak-test"); + } + + @Rule + public final ProvideSystemProperty systemPropertyRule = new ProvideSystemProperty(AzureRepositoryLockV8.LEASE_DURATION_PROP, LEASE_DURATION) + .and(AzureRepositoryLockV8.RENEWAL_INTERVAL_PROP, RENEWAL_INTERVAL) + .and(AzureRepositoryLockV8.TIME_TO_WAIT_BEFORE_WRITE_BLOCK_PROP, TIME_TO_WAIT_BEFORE_BLOCK); + + @Test + public void testFailingLock() throws URISyntaxException, IOException, StorageException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + try { + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + fail("The second lock should fail."); + } catch (IOException e) { + // it's fine + } + } + + @Test + public void testWaitingLock() throws URISyntaxException, IOException, StorageException, InterruptedException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + Semaphore s = new Semaphore(0); + new Thread(() -> { + try { + RepositoryLock lock = new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController()).lock(); + s.release(); + Thread.sleep(1000); + lock.unlock(); + } catch (Exception e) { + log.error("Can't lock or unlock the repo", e); + } + }).start(); + + s.acquire(); + new AzureRepositoryLockV8(blob, () -> {}, new WriteAccessController(), 10).lock(); + } + + @Test + public void testLeaseRefreshUnsuccessful() throws URISyntaxException, StorageException, IOException, InterruptedException { + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + // instrument the mock to throw the exception twice when renewing the lease + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + Mockito.doThrow(storageException) + .doThrow(storageException) + .doCallRealMethod() + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); + + // wait till lease expires + Thread.sleep(16000); + + // reset the mock to default behaviour + Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + try { + new AzureRepositoryLockV8(blobMocked, () -> {}, new WriteAccessController()).lock(); + fail("The second lock should fail."); + } catch (IOException e) { + // it's fine + } + } + + @Test + public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { + + CloudBlockBlob blob = container.getBlockBlobReference("oak/repo.lock"); + + CloudBlockBlob blobMocked = Mockito.spy(blob); + + // instrument the mock to throw the exception twice when renewing the lease + StorageException storageException = + new StorageException(StorageErrorCodeStrings.OPERATION_TIMED_OUT, "operation timeout", new TimeoutException()); + Mockito + .doCallRealMethod() + .doThrow(storageException) + .when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + + + WriteAccessController writeAccessController = new WriteAccessController(); + + new AzureRepositoryLockV8(blobMocked, () -> {}, writeAccessController).lock(); + + + Thread thread = new Thread(() -> { + + while (true) { + writeAccessController.checkWritingAllowed(); + + } + }); + + thread.start(); + + Thread.sleep(3000); + assertFalse("after 3 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Thread.sleep(3000); + assertFalse("after 6 seconds thread should not be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Thread.sleep(5000); + assertTrue("after more than 9 seconds thread should be in a waiting state", thread.getState().equals(Thread.State.WAITING)); + + Mockito.doCallRealMethod().when(blobMocked).renewLease(Mockito.any(), Mockito.any(), Mockito.any()); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java new file mode 100644 index 00000000000..2a46b309a5c --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockserver.client.MockServerClient; +import org.mockserver.junit.MockServerRule; +import org.mockserver.matchers.Times; +import org.mockserver.model.BinaryBody; +import org.mockserver.model.HttpRequest; +import shaded_package.org.apache.http.client.utils.URIBuilder; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.UUID; + +import static org.junit.Assert.assertThrows; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; +import static org.mockserver.verify.VerificationTimes.exactly; + +public class AzureSegmentArchiveWriterV8Test { + public static final String BASE_PATH = "/devstoreaccount1/oak-test"; + public static final int MAX_ATTEMPTS = 3; + + @Rule + public MockServerRule mockServerRule = new MockServerRule(this); + + @SuppressWarnings("unused") + private MockServerClient mockServerClient; + + private CloudBlobContainer container; + + @Before + public void setUp() throws Exception { + container = createCloudBlobContainer(); + + System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); + System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); + + // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter + System.setProperty("segment.azure.retry.attempts", "0"); + System.setProperty("segment.timeout.execution", "1"); + } + + @Test + public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest writeBinaryReferencesRequest = getWriteBinaryReferencesRequest(); + // fail twice + mockServerClient + .when(writeBinaryReferencesRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(writeBinaryReferencesRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.writeBinaryReferences(new byte[10]); + + mockServerClient.verify(writeBinaryReferencesRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest writeGraphRequest = getWriteGraphRequest(); + // fail twice + mockServerClient + .when(writeGraphRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(writeGraphRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.writeGraph(new byte[10]); + + mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest closeArchiveRequest = getCloseArchiveRequest(); + // fail twice + mockServerClient + .when(closeArchiveRequest, Times.exactly(2)) + .respond(response().withStatusCode(500)); + // then succeed + mockServerClient + .when(closeArchiveRequest, Times.once()) + .respond(response().withStatusCode(201)); + + writer.close(); + + mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); + } + + @Test + public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception { + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + + HttpRequest closeArchiveRequest = getCloseArchiveRequest(); + // always fail + mockServerClient + .when(closeArchiveRequest, Times.unlimited()) + .respond(response().withStatusCode(500)); + + + assertThrows(IOException.class, writer::close); + + mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); + } + + + private void writeAndFlushSegment(SegmentArchiveWriter writer) throws IOException { + expectWriteRequests(); + UUID u = UUID.randomUUID(); + writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); + writer.flush(); + } + + private void expectWriteRequests() { + mockServerClient + .when(getUploadSegmentDataRequest(), Times.once()) + .respond(response().withStatusCode(201)); + + mockServerClient + .when(getUploadSegmentMetadataRequest(), Times.once()) + .respond(response().withStatusCode(200)); + } + + @NotNull + private SegmentArchiveWriter createSegmentArchiveWriter() throws URISyntaxException, IOException { + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak"));/**/ + azurePersistenceV8.setWriteAccessController(writeAccessController); + SegmentArchiveManager manager = azurePersistenceV8.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + SegmentArchiveWriter writer = manager.create("data00000a.tar"); + return writer; + } + + private static HttpRequest getCloseArchiveRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/closed"); + } + + private static HttpRequest getWriteBinaryReferencesRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.brf"); + } + + private static HttpRequest getWriteGraphRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.gph"); + } + + private static HttpRequest getUploadSegmentMetadataRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withQueryStringParameter("comp", "metadata"); + } + + private static HttpRequest getUploadSegmentDataRequest() { + return request() + .withMethod("PUT") + .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withBody(new BinaryBody(new byte[10])); + } + + @NotNull + private CloudBlobContainer createCloudBlobContainer() throws URISyntaxException, StorageException { + URI uri = new URIBuilder() + .setScheme("http") + .setHost(mockServerClient.remoteAddress().getHostName()) + .setPort(mockServerClient.remoteAddress().getPort()) + .setPath(BASE_PATH) + .build(); + + return new CloudBlobContainer(uri); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java new file mode 100644 index 00000000000..de8e5c13bb2 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java @@ -0,0 +1,286 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import org.apache.jackrabbit.guava.common.collect.ImmutableSet; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.*; +import java.io.IOException; +import java.net.URISyntaxException; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Set; +import java.util.stream.StreamSupport; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.azure.AzureSegmentStoreService; +import org.apache.jackrabbit.oak.segment.azure.Configuration; +import org.apache.jackrabbit.oak.segment.azure.util.Environment; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.sling.testing.mock.osgi.junit.OsgiContext; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.osgi.util.converter.Converters; + +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; + +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.ADD; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.CREATE; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.LIST; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.READ; +import static com.microsoft.azure.storage.blob.SharedAccessBlobPermissions.WRITE; +import static java.util.stream.Collectors.toSet; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeNotNull; + +public class AzureSegmentStoreServiceV8Test { + private static final Environment ENVIRONMENT = new Environment(); + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public final OsgiContext context = new OsgiContext(); + + private static final EnumSet READ_ONLY = EnumSet.of(READ, LIST); + private static final EnumSet READ_WRITE = EnumSet.of(READ, LIST, CREATE, WRITE, ADD); + private static final ImmutableSet BLOBS = ImmutableSet.of("blob1", "blob2"); + + private CloudBlobContainer container; + + @Before + public void setup() throws Exception { + container = azurite.getContainer(AzureSegmentStoreService.DEFAULT_CONTAINER_NAME); + for (String blob : BLOBS) { + container.getBlockBlobReference(blob + ".txt").uploadText(blob); + } + } + + @Test + public void connectWithSharedAccessSignatureURL_readOnly() throws Exception { + String sasToken = container.generateSharedAccessSignature(policy(READ_ONLY), null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessNotGranted(persistence); + assertReadAccessGranted(persistence, BLOBS); + } + + @Test + public void connectWithSharedAccessSignatureURL_readWrite() throws Exception { + String sasToken = container.generateSharedAccessSignature(policy(READ_WRITE), null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithSharedAccessSignatureURL_expired() throws Exception { + SharedAccessBlobPolicy expiredPolicy = policy(READ_WRITE, yesterday()); + String sasToken = container.generateSharedAccessSignature(expiredPolicy, null); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithSharedAccessSignature(sasToken)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessNotGranted(persistence); + assertReadAccessNotGranted(persistence); + } + + @Test + public void connectWithAccessKey() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithConnectionURL() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithConfigurationURL(AzuriteDockerRule.ACCOUNT_KEY)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void connectWithServicePrincipal() throws Exception { + // Note: make sure blob1.txt and blob2.txt are uploaded to + // AZURE_ACCOUNT_NAME/oak before running this test + + assumeNotNull(ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_TENANT_ID)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_ID)); + assumeNotNull(ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET)); + + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + String accountName = ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME); + String tenantId = ENVIRONMENT.getVariable(AZURE_TENANT_ID); + String clientId = ENVIRONMENT.getVariable(AZURE_CLIENT_ID); + String clientSecret = ENVIRONMENT.getVariable(AZURE_CLIENT_SECRET); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithServicePrincipal(accountName, clientId, clientSecret, tenantId)); + + SegmentNodeStorePersistence persistence = context.getService(SegmentNodeStorePersistence.class); + assertNotNull(persistence); + assertWriteAccessGranted(persistence); + assertReadAccessGranted(persistence, concat(BLOBS, "test")); + } + + @Test + public void deactivate() throws Exception { + AzureSegmentStoreService azureSegmentStoreService = new AzureSegmentStoreService(); + azureSegmentStoreService.activate(context.componentContext(), getConfigurationWithAccessKey(AzuriteDockerRule.ACCOUNT_KEY)); + assertNotNull(context.getService(SegmentNodeStorePersistence.class)); + + azureSegmentStoreService.deactivate(); + assertNull(context.getService(SegmentNodeStorePersistence.class)); + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions, Instant expirationTime) { + SharedAccessBlobPolicy sharedAccessBlobPolicy = new SharedAccessBlobPolicy(); + sharedAccessBlobPolicy.setPermissions(permissions); + sharedAccessBlobPolicy.setSharedAccessExpiryTime(Date.from(expirationTime)); + return sharedAccessBlobPolicy; + } + + @NotNull + private static SharedAccessBlobPolicy policy(EnumSet permissions) { + return policy(permissions, Instant.now().plus(Duration.ofDays(7))); + } + + private static void assertReadAccessGranted(SegmentNodeStorePersistence persistence, Set expectedBlobs) throws Exception { + CloudBlobContainer container = getContainerFrom(persistence); + Set actualBlobNames = StreamSupport.stream(container.listBlobs().spliterator(), false) + .map(blob -> blob.getUri().getPath()) + .map(path -> path.substring(path.lastIndexOf('/') + 1)) + .filter(name -> name.equals("test.txt") || name.startsWith("blob")) + .collect(toSet()); + Set expectedBlobNames = expectedBlobs.stream().map(name -> name + ".txt").collect(toSet()); + + assertEquals(expectedBlobNames, actualBlobNames); + + Set actualBlobContent = actualBlobNames.stream() + .map(name -> { + try { + return container.getBlockBlobReference(name).downloadText(); + } catch (StorageException | IOException | URISyntaxException e) { + throw new RuntimeException("Error while reading blob " + name, e); + } + }) + .collect(toSet()); + assertEquals(expectedBlobs, actualBlobContent); + } + + private static void assertWriteAccessGranted(SegmentNodeStorePersistence persistence) throws Exception { + getContainerFrom(persistence) + .getBlockBlobReference("test.txt").uploadText("test"); + } + + private static CloudBlobContainer getContainerFrom(SegmentNodeStorePersistence persistence) throws Exception { + return ((AzurePersistenceV8) persistence).getSegmentstoreDirectory().getContainer(); + } + + private static void assertWriteAccessNotGranted(SegmentNodeStorePersistence persistence) { + try { + assertWriteAccessGranted(persistence); + fail("Write access should not be granted, but writing to the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static void assertReadAccessNotGranted(SegmentNodeStorePersistence persistence) { + try { + assertReadAccessGranted(persistence, BLOBS); + fail("Read access should not be granted, but reading from the storage succeeded."); + } catch (Exception e) { + // successful + } + } + + private static Instant yesterday() { + return Instant.now().minus(Duration.ofDays(1)); + } + + private static ImmutableSet concat(ImmutableSet blobs, String element) { + return ImmutableSet.builder().addAll(blobs).add(element).build(); + } + + private static Configuration getConfigurationWithSharedAccessSignature(String sasToken) { + return getConfiguration(sasToken, AzuriteDockerRule.ACCOUNT_NAME, null, null, null, null, null); + } + + private static Configuration getConfigurationWithAccessKey(String accessKey) { + return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, accessKey, null, null, null, null); + } + + private static Configuration getConfigurationWithConfigurationURL(String accessKey) { + String connectionString = "DefaultEndpointsProtocol=https;" + + "BlobEndpoint=" + azurite.getBlobEndpoint() + ';' + + "AccountName=" + AzuriteDockerRule.ACCOUNT_NAME + ';' + + "AccountKey=" + accessKey + ';'; + return getConfiguration(null, AzuriteDockerRule.ACCOUNT_NAME, null, connectionString, null, null, null); + } + + private static Configuration getConfigurationWithServicePrincipal(String accountName, String clientId, String clientSecret, String tenantId) { + return getConfiguration(null, accountName, null, null, clientId, clientSecret, tenantId); + } + + @NotNull + private static Configuration getConfiguration(String sasToken, String accountName, String accessKey, String connectionURL, String clientId, String clientSecret, String tenantId) { + return Converters.standardConverter() + .convert(new HashMap() {{ + put("accountName", accountName); + put("accessKey", accessKey); + put("connectionURL", connectionURL); + put("sharedAccessSignature", sasToken); + put("clientId", clientId); + put("clientSecret", clientSecret); + put("tenantId", tenantId); + put("blobEndpoint", azurite.getBlobEndpoint()); + }}) + .to(Configuration.class); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java new file mode 100644 index 00000000000..55d0d270a6e --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFileV8Test.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarFileTest; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +public class AzureTarFileV8Test extends TarFileTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + @Override + public void setUp() throws IOException { + try { + container = azurite.getContainer("oak-test"); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8.setWriteAccessController(writeAccessController); + archiveManager = azurePersistenceV8.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + } catch (StorageException | InvalidKeyException | URISyntaxException e) { + throw new IOException(e); + } + } + + @Override + protected long getWriteAndReadExpectedSize() { + return 45; + } + + @Test + @Ignore + @Override + public void graphShouldBeTrimmedDownOnSweep() throws Exception { + super.graphShouldBeTrimmedDownOnSweep(); + } +} \ No newline at end of file diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java new file mode 100644 index 00000000000..d17e3862001 --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarFilesV8Test.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarFiles; +import org.apache.jackrabbit.oak.segment.file.tar.TarFilesTest; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.junit.Before; +import org.junit.ClassRule; + +public class AzureTarFilesV8Test extends TarFilesTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + @Override + public void setUp() throws Exception { + container = azurite.getContainer("oak-test"); + AzurePersistenceV8 azurePersistenceV8 = new AzurePersistenceV8(container.getDirectoryReference("oak")); + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + azurePersistenceV8.setWriteAccessController(writeAccessController); + tarFiles = TarFiles.builder() + .withDirectory(folder.newFolder()) + .withTarRecovery((id, data, recovery) -> { + // Intentionally left blank + }) + .withIOMonitor(new IOMonitorAdapter()) + .withFileStoreMonitor(new FileStoreMonitorAdapter()) + .withRemoteStoreMonitor(new RemoteStoreMonitorAdapter()) + .withMaxFileSize(MAX_FILE_SIZE) + .withPersistence(azurePersistenceV8) + .build(); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java new file mode 100644 index 00000000000..18421c74e7c --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureTarWriterV8Test.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.azure.v8; + +import com.microsoft.azure.storage.blob.CloudBlobContainer; + +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.file.tar.TarWriterTest; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; +import org.jetbrains.annotations.NotNull; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; + +public class AzureTarWriterV8Test extends TarWriterTest { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + private CloudBlobContainer container; + + @Before + public void setUp() throws Exception { + container = azurite.getContainer("oak-test"); + } + + @NotNull + @Override + protected SegmentArchiveManager getSegmentArchiveManager() throws Exception { + WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + AzureArchiveManagerV8 azureArchiveManagerV8 = new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController); + return azureArchiveManagerV8; + } + + @NotNull + @Override + protected SegmentArchiveManager getFailingSegmentArchiveManager() throws Exception { + final WriteAccessController writeAccessController = new WriteAccessController(); + writeAccessController.enableWriting(); + return new AzureArchiveManagerV8(container.getDirectoryReference("oak"), new IOMonitorAdapter(), monitor, writeAccessController) { + @Override + public SegmentArchiveWriter create(String archiveName) throws IOException { + return new AzureSegmentArchiveWriterV8(getDirectory(archiveName), ioMonitor, monitor, writeAccessController) { + @Override + public void writeGraph(@NotNull byte[] data) throws IOException { + throw new IOException("test"); + } + }; + } + }; + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java index 04d5fa0967b..151b825b996 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java @@ -16,25 +16,17 @@ */ package org.apache.jackrabbit.oak.segment.spi.persistence.split; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.util.HashSet; -import java.util.Random; -import java.util.Set; - -import com.microsoft.azure.storage.StorageException; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -45,13 +37,18 @@ import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.TemporaryFolder; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + import static org.junit.Assert.assertEquals; public class SplitPersistenceBlobTest { @@ -75,9 +72,12 @@ public class SplitPersistenceBlobTest { private SegmentNodeStorePersistence splitPersistence; @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { + BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + SegmentNodeStorePersistence sharedPersistence = - new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak")); + new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); File dataStoreDir = new File(folder.getRoot(), "blobstore"); BlobStore blobStore = newBlobStore(dataStoreDir); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java index 48db9845256..d6c01da7bd0 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java @@ -16,12 +16,13 @@ */ package org.apache.jackrabbit.oak.segment.spi.persistence.split; -import com.microsoft.azure.storage.StorageException; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; +import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -37,11 +38,7 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.TemporaryFolder; import java.io.File; @@ -70,8 +67,11 @@ public class SplitPersistenceTest { private SegmentNodeStorePersistence splitPersistence; @Before - public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { - SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak")); + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { + BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); + BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + + SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); baseFileStore = FileStoreBuilder .fileStoreBuilder(folder.newFolder()) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java new file mode 100644 index 00000000000..7b6f1d1e4bd --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceBlobV8Test.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + +import com.microsoft.azure.storage.StorageException; +import org.apache.jackrabbit.oak.api.Blob; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; +import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; +import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.apache.jackrabbit.oak.spi.state.NodeStore; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.junit.Assert.assertEquals; + +public class SplitPersistenceBlobV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private SegmentNodeStore base; + + private SegmentNodeStore split; + + private FileStore baseFileStore; + + private FileStore splitFileStore; + + private String baseBlobId; + + private SegmentNodeStorePersistence splitPersistence; + + @Before + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + SegmentNodeStorePersistence sharedPersistence = + new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); + File dataStoreDir = new File(folder.getRoot(), "blobstore"); + BlobStore blobStore = newBlobStore(dataStoreDir); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .withBlobStore(blobStore) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v1"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + baseBlobId = createLoad(base, baseFileStore).getContentIdentity(); + baseFileStore.flush(); + baseFileStore.close(); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .withBlobStore(blobStore) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + createLoad(base, baseFileStore).getContentIdentity(); + baseFileStore.flush(); + + SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); + splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); + + splitFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(splitPersistence) + .withBlobStore(blobStore) + .build(); + split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); + } + + @After + public void tearDown() { + baseFileStore.close(); + } + + @Test + public void collectReferences() + throws IOException, CommitFailedException { + String blobId = createLoad(split, splitFileStore).getContentIdentity(); + + assertReferences(2, CollectionUtils.toSet(baseBlobId, blobId)); + } + + private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { + byte[] data = new byte[size]; + new Random().nextBytes(data); + return nodeStore.createBlob(new ByteArrayInputStream(data)); + } + + private static BlobStore newBlobStore(File directory) { + OakFileDataStore delegate = new OakFileDataStore(); + delegate.setPath(directory.getAbsolutePath()); + delegate.init(null); + return new DataStoreBlobStore(delegate); + } + + private Blob createLoad(SegmentNodeStore store, FileStore fileStore) + throws IOException, CommitFailedException { + NodeBuilder builder = store.getRoot().builder(); + Blob blob = createBlob(store, 18000); + builder.setProperty("bin", blob); + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fileStore.flush(); + return blob; + } + + private void assertReferences(int count, Set blobIds) + throws IOException { + Set actualReferences = new HashSet<>(); + splitFileStore.collectBlobReferences(actualReferences::add); + assertEquals("visible references different", count, actualReferences.size()); + assertEquals("Binary reference returned should be same", blobIds, actualReferences); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java new file mode 100644 index 00000000000..a5af0abb47b --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/v8/SplitPersistenceV8Test.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.segment.spi.persistence.split.v8; + +import com.microsoft.azure.storage.StorageException; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; +import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; +import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence; +import org.apache.jackrabbit.oak.segment.file.tar.binaries.BinaryReferencesIndexLoader; +import org.apache.jackrabbit.oak.segment.file.tar.binaries.InvalidBinaryReferencesIndexException; +import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitorAdapter; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveReader; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; +import org.apache.jackrabbit.oak.segment.spi.persistence.split.SplitPersistence; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.security.InvalidKeyException; + +import static org.junit.Assert.assertEquals; + +public class SplitPersistenceV8Test { + + @ClassRule + public static AzuriteDockerRule azurite = new AzuriteDockerRule(); + + @Rule + public TemporaryFolder folder = new TemporaryFolder(new File("target")); + + private SegmentNodeStore base; + + private SegmentNodeStore split; + + private FileStore baseFileStore; + + private FileStore splitFileStore; + + private SegmentNodeStorePersistence splitPersistence; + + @Before + public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException { + SegmentNodeStorePersistence sharedPersistence = new AzurePersistenceV8(azurite.getContainer("oak-test").getDirectoryReference("oak")); + + baseFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(sharedPersistence) + .build(); + base = SegmentNodeStoreBuilders.builder(baseFileStore).build(); + + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v1"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + baseFileStore.flush(); + + SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder()); + splitPersistence = new SplitPersistence(sharedPersistence, localPersistence); + + splitFileStore = FileStoreBuilder + .fileStoreBuilder(folder.newFolder()) + .withCustomPersistence(splitPersistence) + .build(); + split = SegmentNodeStoreBuilders.builder(splitFileStore).build(); + } + + @After + public void tearDown() { + if (splitFileStore != null) { + splitFileStore.close(); + } + + if (baseFileStore != null) { + baseFileStore.close(); + } + } + + @Test + public void testBaseNodeAvailable() { + assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testChangesAreLocalForBaseRepository() throws CommitFailedException { + NodeBuilder builder = base.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v2"); + base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + assertEquals("v1", split.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testChangesAreLocalForSplitRepository() throws CommitFailedException { + NodeBuilder builder = split.getRoot().builder(); + builder.child("foo").child("bar").setProperty("version", "v2"); + split.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + assertEquals("v1", base.getRoot().getChildNode("foo").getChildNode("bar").getString("version")); + } + + @Test + public void testBinaryReferencesAreNotNull() throws IOException, InvalidBinaryReferencesIndexException { + splitFileStore.close(); + splitFileStore = null; + + SegmentArchiveManager manager = splitPersistence.createArchiveManager(true, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); + for (String archive : manager.listArchives()) { + SegmentArchiveReader reader = manager.open(archive); + BinaryReferencesIndexLoader.parseBinaryReferencesIndex(reader.getBinaryReferences()); + reader.close(); + } + } +} diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java index ae299321978..44ef7a4eba9 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactory.java @@ -24,9 +24,9 @@ import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.jackrabbit.guava.common.io.Files; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; @@ -56,7 +56,7 @@ public class SegmentAzureFactory implements NodeStoreFactory { private int segmentCacheSize; private final boolean readOnly; private static final Environment environment = new Environment(); - private AzureStorageCredentialManager azureStorageCredentialManager; + private AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public static class Builder { private final String dir; @@ -118,7 +118,7 @@ public SegmentAzureFactory(Builder builder) { @Override public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; try { azPersistence = createAzurePersistence(closer); } catch (StorageException | URISyntaxException | InvalidKeyException e) { @@ -152,34 +152,34 @@ public NodeStore create(BlobStore blobStore, Closer closer) throws IOException { } } - private AzurePersistence createAzurePersistence(Closer closer) throws StorageException, URISyntaxException, InvalidKeyException { + private AzurePersistenceV8 createAzurePersistence(Closer closer) throws StorageException, URISyntaxException, InvalidKeyException { CloudBlobDirectory cloudBlobDirectory = null; // connection string will take precedence over accountkey / sas / service principal if (StringUtils.isNoneBlank(connectionString, containerName)) { - cloudBlobDirectory = AzureUtilities.cloudBlobDirectoryFrom(connectionString, containerName, dir); + cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(connectionString, containerName, dir); } else if (StringUtils.isNoneBlank(accountName, uri)) { StorageCredentials credentials = null; if (StringUtils.isNotBlank(sasToken)) { credentials = new StorageCredentialsSharedAccessSignature(sasToken); } else { - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); - credentials = azureStorageCredentialManager.getStorageCredentialsFromEnvironment(accountName, environment); - closer.register(azureStorageCredentialManager); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); + credentials = azureStorageCredentialManagerV8.getStorageCredentialsFromEnvironment(accountName, environment); + closer.register(azureStorageCredentialManagerV8); } - cloudBlobDirectory = AzureUtilities.cloudBlobDirectoryFrom(credentials, uri, dir); + cloudBlobDirectory = AzureUtilitiesV8.cloudBlobDirectoryFrom(credentials, uri, dir); } if (cloudBlobDirectory == null) { throw new IllegalArgumentException("Could not connect to Azure storage. Too few connection parameters specified!"); } - return new AzurePersistence(cloudBlobDirectory); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override public boolean hasExternalBlobReferences() throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; Closer closer = Closer.create(); CliUtils.handleSigInt(closer); try { diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java index 1d42f0ee296..db8e92de54b 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/SegmentTarToSegmentAzureServicePrincipalTest.java @@ -16,7 +16,7 @@ */ package org.apache.jackrabbit.oak.upgrade.cli; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.container.NodeStoreContainer; import org.apache.jackrabbit.oak.upgrade.cli.container.SegmentAzureServicePrincipalNodeStoreContainer; @@ -34,8 +34,8 @@ public class SegmentTarToSegmentAzureServicePrincipalTest extends AbstractOak2Oa @Override public void prepare() throws Exception { - assumeNotNull(ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilities.AZURE_TENANT_ID), - ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilities.AZURE_CLIENT_SECRET)); + assumeNotNull(ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_TENANT_ID), + ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_ID), ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_CLIENT_SECRET)); skipTest = false; super.prepare(); } diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java index 5b99de8ad07..c7b69ec69db 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureNodeStoreContainer.java @@ -23,8 +23,8 @@ import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; @@ -76,9 +76,9 @@ private SegmentAzureNodeStoreContainer(AzuriteDockerRule azurite, BlobStoreConta @Override public NodeStore open() throws IOException { - AzurePersistence azPersistence = null; + AzurePersistenceV8 azPersistence = null; try { - azPersistence = new AzurePersistence(container.getDirectoryReference(dir)); + azPersistence = new AzurePersistenceV8(container.getDirectoryReference(dir)); } catch (URISyntaxException e) { throw new IllegalStateException(e); } @@ -113,7 +113,7 @@ public void close() { @Override public void clean() throws IOException { try { - AzureUtilities.deleteAllEntries(container.getDirectoryReference(dir)); + AzureUtilitiesV8.deleteAllEntries(container.getDirectoryReference(dir)); } catch (URISyntaxException e) { throw new IOException(e); } diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java index c0619c60419..5a82f6ef9c1 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/container/SegmentAzureServicePrincipalNodeStoreContainer.java @@ -19,9 +19,9 @@ import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.jackrabbit.guava.common.io.Files; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; -import org.apache.jackrabbit.oak.segment.azure.AzurePersistence; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.segment.file.FileStore; @@ -43,8 +43,8 @@ public class SegmentAzureServicePrincipalNodeStoreContainer implements NodeStore private final BlobStore blobStore; private FileStore fs; private File tmpDir; - private AzurePersistence azurePersistence; - private final AzureStorageCredentialManager azureStorageCredentialManager; + private AzurePersistenceV8 azurePersistenceV8; + private final AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public SegmentAzureServicePrincipalNodeStoreContainer() { this(null); @@ -52,21 +52,21 @@ public SegmentAzureServicePrincipalNodeStoreContainer() { public SegmentAzureServicePrincipalNodeStoreContainer(BlobStore blobStore) { this.blobStore = blobStore; - this.azureStorageCredentialManager = new AzureStorageCredentialManager(); + this.azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8(); } @Override public NodeStore open() throws IOException { try { - azurePersistence = createAzurePersistence(); + azurePersistenceV8 = createAzurePersistence(); } catch (Exception e) { throw new IllegalStateException(e); } tmpDir = Files.createTempDir(); FileStoreBuilder builder = FileStoreBuilder.fileStoreBuilder(tmpDir) - .withCustomPersistence(azurePersistence).withMemoryMapping(false); + .withCustomPersistence(azurePersistenceV8).withMemoryMapping(false); if (blobStore != null) { builder.withBlobStore(blobStore); } @@ -80,14 +80,14 @@ public NodeStore open() throws IOException { return new FileStoreUtils.NodeStoreWithFileStore(SegmentNodeStoreBuilders.builder(fs).build(), fs); } - private AzurePersistence createAzurePersistence() { - if (azurePersistence != null) { - return azurePersistence; + private AzurePersistenceV8 createAzurePersistence() { + if (azurePersistenceV8 != null) { + return azurePersistenceV8; } - String path = String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), + String path = String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), CONTAINER_NAME, DIR); - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManager); - return new AzurePersistence(cloudBlobDirectory); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(path, ENVIRONMENT, azureStorageCredentialManagerV8); + return new AzurePersistenceV8(cloudBlobDirectory); } @Override @@ -99,16 +99,16 @@ public void close() { if (tmpDir != null) { tmpDir.delete(); } - if (azureStorageCredentialManager != null) { - azureStorageCredentialManager.close(); + if (azureStorageCredentialManagerV8 != null) { + azureStorageCredentialManagerV8.close(); } } @Override public void clean() throws IOException { - AzurePersistence azurePersistence = createAzurePersistence(); + AzurePersistenceV8 azurePersistenceV8 = createAzurePersistence(); try { - AzureUtilities.deleteAllBlobs(azurePersistence.getSegmentstoreDirectory()); + AzureUtilitiesV8.deleteAllBlobs(azurePersistenceV8.getSegmentstoreDirectory()); } catch (Exception e) { throw new IOException(e); } @@ -116,7 +116,7 @@ public void clean() throws IOException { @Override public String getDescription() { - return "az:" + String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilities.AZURE_ACCOUNT_NAME), + return "az:" + String.format(AZURE_SEGMENT_STORE_PATH, ENVIRONMENT.getVariable(AzureUtilitiesV8.AZURE_ACCOUNT_NAME), CONTAINER_NAME, DIR); } } \ No newline at end of file diff --git a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java index 00e10ddfe5f..691411c011d 100644 --- a/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java +++ b/oak-upgrade/src/test/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentAzureFactoryTest.java @@ -25,8 +25,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.guava.common.io.Closer; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; -import org.apache.jackrabbit.oak.segment.azure.AzureStorageCredentialManager; -import org.apache.jackrabbit.oak.segment.azure.AzureUtilities; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureStorageCredentialManagerV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8; import org.apache.jackrabbit.oak.segment.azure.tool.ToolUtils; import org.apache.jackrabbit.oak.segment.azure.util.Environment; import org.apache.jackrabbit.oak.upgrade.cli.CliUtils; @@ -40,11 +40,11 @@ import java.util.Date; import java.util.EnumSet; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_ACCOUNT_NAME; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_ID; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_CLIENT_SECRET; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_SECRET_KEY; -import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.AZURE_TENANT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_ACCOUNT_NAME; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_ID; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_CLIENT_SECRET; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_SECRET_KEY; +import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.AZURE_TENANT_ID; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeNotNull; import static org.junit.Assume.assumeTrue; @@ -109,7 +109,7 @@ public void testConnectionWithUri_accessKey() throws IOException { String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { try { SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, false) @@ -122,7 +122,7 @@ public void testConnectionWithUri_accessKey() throws IOException { assertEquals(1, nodeStore.getFileStore().getSegmentCount()); } finally { closer.close(); - cleanup(uri, azureStorageCredentialManager); + cleanup(uri, azureStorageCredentialManagerV8); } } } @@ -136,7 +136,7 @@ public void testConnectionWithUri_servicePrincipal() throws IOException, Interru String uri = String.format(CONNECTION_URI, ENVIRONMENT.getVariable(AZURE_ACCOUNT_NAME), CONTAINER_NAME); Closer closer = Closer.create(); - try (AzureStorageCredentialManager azureStorageCredentialManager = new AzureStorageCredentialManager()) { + try (AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8 = new AzureStorageCredentialManagerV8()) { try { SegmentAzureFactory segmentAzureFactory = new SegmentAzureFactory.Builder(DIR, 256, false) @@ -149,16 +149,16 @@ public void testConnectionWithUri_servicePrincipal() throws IOException, Interru assertEquals(1, nodeStore.getFileStore().getSegmentCount()); } finally { closer.close(); - cleanup(uri, azureStorageCredentialManager); + cleanup(uri, azureStorageCredentialManagerV8); } } } - private void cleanup(String uri, AzureStorageCredentialManager azureStorageCredentialManager) { + private void cleanup(String uri, AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8) { uri = uri + "/" + DIR; try { - CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(uri, ENVIRONMENT, azureStorageCredentialManager); - AzureUtilities.deleteAllBlobs(cloudBlobDirectory); + CloudBlobDirectory cloudBlobDirectory = ToolUtils.createCloudBlobDirectory(uri, ENVIRONMENT, azureStorageCredentialManagerV8); + AzureUtilitiesV8.deleteAllBlobs(cloudBlobDirectory); } catch (Exception e) { throw new IllegalStateException(e); } From 75edd07b89039af8890b9911352740a33394d415 Mon Sep 17 00:00:00 2001 From: ierandra Date: Mon, 30 Sep 2024 22:04:56 +0800 Subject: [PATCH 02/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - configure package versions --- .../oak/segment/azure/package-info.java | 2 +- .../oak/segment/azure/util/package-info.java | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java index 954129d77c7..5fc321f6f9d 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/package-info.java @@ -15,7 +15,7 @@ * limitations under the License. */ @Internal(since = "1.0.0") -@Version("3.0.0") +@Version("4.0.0") package org.apache.jackrabbit.oak.segment.azure; import org.apache.jackrabbit.oak.commons.annotations.Internal; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java new file mode 100644 index 00000000000..792686fe8b9 --- /dev/null +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@Internal(since = "1.0.0") +@Version("2.0.0") +package org.apache.jackrabbit.oak.segment.azure.util; + +import org.apache.jackrabbit.oak.commons.annotations.Internal; +import org.osgi.annotation.versioning.Version; From 276833046f08cfab3606447530fb5dc3e647c1ba Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 1 Oct 2024 19:33:01 +0800 Subject: [PATCH 03/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review fixes --- oak-segment-azure/pom.xml | 6 ++ .../oak/segment/azure/AzureJournalFile.java | 9 +- .../oak/segment/azure/AzurePersistence.java | 7 +- .../oak/segment/azure/AzureUtilities.java | 10 ++ .../azure/AzureArchiveManagerTest.java | 12 ++- .../AzureHttpRequestLoggingPolicyTest.java | 100 ++++++++++++++++++ .../azure/AzureSegmentStoreServiceTest.java | 5 +- .../split/SplitPersistenceBlobTest.java | 6 +- .../split/SplitPersistenceTest.java | 6 +- 9 files changed, 152 insertions(+), 9 deletions(-) create mode 100644 oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java diff --git a/oak-segment-azure/pom.xml b/oak-segment-azure/pom.xml index 72522ea34f2..0b3df23094c 100644 --- a/oak-segment-azure/pom.xml +++ b/oak-segment-azure/pom.xml @@ -399,6 +399,12 @@ junit test + + io.projectreactor + reactor-test + 3.6.10 + test + org.testcontainers testcontainers diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java index b4da690b625..b7f63895630 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureJournalFile.java @@ -33,7 +33,11 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import java.util.*; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.HashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -142,8 +146,7 @@ public String readLine() throws IOException { } reader = new ReverseFileReader(blobContainerClient, blob); if (firstLineReturned) { - while ("".equals(reader.readLine())) - ; // the first line was already returned, let's fast-forward it + while ("".equals(reader.readLine())); // the first line was already returned, let's fast-forward it } } catch (BlobStorageException e) { throw new IOException(e); diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java index 2bb687a12da..0768875fdc6 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java @@ -27,7 +27,12 @@ import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; -import org.apache.jackrabbit.oak.segment.spi.persistence.*; +import org.apache.jackrabbit.oak.segment.spi.persistence.GCJournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.JournalFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.ManifestFile; +import org.apache.jackrabbit.oak.segment.spi.persistence.RepositoryLock; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; +import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java index 1aafe96ed5c..9c4525abb26 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java @@ -80,6 +80,16 @@ public static void readBufferFully(BlockBlobClient blob, Buffer buffer) throws I } } + public static void deleteAllEntries(BlobContainerClient blobContainerClient, ListBlobsOptions listBlobsOptions) { + getBlobs(blobContainerClient, listBlobsOptions).forEach(b -> { + try { + blobContainerClient.getBlobClient(b.getName()).deleteIfExists(); + } catch (BlobStorageException e) { + log.error("Can't delete blob {}", b.getName(), e); + } + }); + } + private static class ByteBufferOutputStream extends OutputStream { @NotNull diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java index ab9a1d3b66d..62fe73d4cb0 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java @@ -52,7 +52,11 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.*; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.Assert; import org.junit.contrib.java.lang.system.ProvideSystemProperty; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; @@ -62,7 +66,11 @@ import java.io.IOException; import java.net.URISyntaxException; import java.security.InvalidKeyException; -import java.util.*; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java new file mode 100644 index 00000000000..fb09ce2f81e --- /dev/null +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java @@ -0,0 +1,100 @@ +package org.apache.jackrabbit.oak.segment.azure; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import org.apache.jackrabbit.oak.segment.spi.monitor.RemoteStoreMonitor; +import org.junit.Before; +import org.junit.Test; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.*; + +public class AzureHttpRequestLoggingPolicyTest { + + private AzureHttpRequestLoggingPolicy loggingPolicy; + private HttpPipelineCallContext mockContext; + private HttpPipelineNextPolicy mockNextPolicy; + private HttpResponse mockHttpResponse; + private RemoteStoreMonitor mockRemoteStoreMonitor; + + @Before + public void setup() { + loggingPolicy = new AzureHttpRequestLoggingPolicy(); + mockContext = mock(HttpPipelineCallContext.class); + mockNextPolicy = mock(HttpPipelineNextPolicy.class); + mockHttpResponse = mock(HttpResponse.class); + mockRemoteStoreMonitor = mock(RemoteStoreMonitor.class); + } + + + @Test + public void testRemoteStoreMonitorTracksMetrics() { + // Attach the remote store monitor + loggingPolicy.setRemoteStoreMonitor(mockRemoteStoreMonitor); + + // Setup mock behavior + HttpRequest mockHttpRequest = mock(HttpRequest.class); + when(mockContext.getHttpRequest()).thenReturn(mockHttpRequest); + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(200); + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify the result + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + // Verify that the monitor recorded the metrics + verify(mockRemoteStoreMonitor, times(1)).requestDuration(anyLong(), eq(TimeUnit.NANOSECONDS)); + verify(mockRemoteStoreMonitor, times(1)).requestCount(); + verify(mockRemoteStoreMonitor, never()).requestError(); + } + + @Test + public void testErrorStatusCodeTriggersErrorCount() { + loggingPolicy.setRemoteStoreMonitor(mockRemoteStoreMonitor); + + // Setup mock behavior + HttpRequest mockHttpRequest = mock(HttpRequest.class); + when(mockContext.getHttpRequest()).thenReturn(mockHttpRequest); + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(500); // Error status code + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify the result + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + // Verify that error count was recorded + verify(mockRemoteStoreMonitor, times(1)).requestDuration(anyLong(), eq(TimeUnit.NANOSECONDS)); + verify(mockRemoteStoreMonitor, times(1)).requestError(); + verify(mockRemoteStoreMonitor, never()).requestCount(); + } + + @Test + public void testNoRemoteStoreMonitor() { + // Setup: No remoteStoreMonitor is attached + when(mockNextPolicy.process()).thenReturn(Mono.just(mockHttpResponse)); + when(mockHttpResponse.getStatusCode()).thenReturn(200); + + // Run the process method + Mono result = loggingPolicy.process(mockContext, mockNextPolicy); + + // Verify that the result is correct and that no interactions with the monitor occurred + StepVerifier.create(result) + .expectNext(mockHttpResponse) + .verifyComplete(); + + verifyNoInteractions(mockRemoteStoreMonitor); + } +} diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java index 331882cb0dd..1a64a938c5b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreServiceTest.java @@ -43,7 +43,10 @@ import static java.util.stream.Collectors.toSet; import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.*; -import static org.junit.Assert.*; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import static org.junit.Assume.assumeNotNull; public class AzureSegmentStoreServiceTest { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java index 151b825b996..1397bfcb8c8 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java @@ -37,7 +37,11 @@ import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.*; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.ByteArrayInputStream; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java index d6c01da7bd0..6b60bc5f7c7 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java @@ -38,7 +38,11 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.*; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; From f98b20b56cf2069bca17940dfc4cf4be30a42132 Mon Sep 17 00:00:00 2001 From: ierandra Date: Wed, 2 Oct 2024 18:02:15 +0800 Subject: [PATCH 04/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix tests --- .../azure/util/AzureRequestOptions.java | 4 +- .../azure/AzureSegmentArchiveWriterTest.java | 80 ++++++++++++++----- .../oak/segment/azure/AzuriteDockerRule.java | 2 +- 3 files changed, 62 insertions(+), 24 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index d5ed8394183..7ddb8067c9c 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -49,7 +49,7 @@ public static RequestRetryOptions getRetryOptionsDefault() { } public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + int retryAttempts = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); long timeoutIntervalToMs = timeoutInterval * 1_000L; @@ -68,7 +68,7 @@ public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { * @return */ public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + int retryAttempts = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); long writeTimeoutIntervalToMs = writeTimeoutInterval * 1_000L; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index f7114c36538..9d5897eb0f7 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -19,6 +19,10 @@ package org.apache.jackrabbit.oak.segment.azure; import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.policy.RequestRetryOptions; +import org.apache.jackrabbit.oak.segment.azure.util.AzureRequestOptions; import org.apache.jackrabbit.oak.segment.remote.WriteAccessController; import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter; import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter; @@ -39,6 +43,8 @@ import java.io.IOException; import java.util.UUID; +import static org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule.ACCOUNT_KEY; +import static org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule.ACCOUNT_NAME; import static org.junit.Assert.assertThrows; import static org.mockserver.model.HttpRequest.request; import static org.mockserver.model.HttpResponse.response; @@ -49,7 +55,7 @@ public class AzureSegmentArchiveWriterTest { public static final int MAX_ATTEMPTS = 3; @Rule - public MockServerRule mockServerRule = new MockServerRule(this); + public MockServerRule mockServerRule = new MockServerRule(this, 1080); @SuppressWarnings("unused") private MockServerClient mockServerClient; @@ -57,26 +63,20 @@ public class AzureSegmentArchiveWriterTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private BlobContainerClient readBlobContainerClient; - private BlobContainerClient writeBlobContainerClient; - @Before public void setUp() throws Exception { + mockServerClient = new MockServerClient("localhost", 1080); System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.attempts", "1"); + System.setProperty("segment.azure.retry.backoff", "1"); System.setProperty("segment.timeout.execution", "1"); - - readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); - writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); } @Test public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest writeBinaryReferencesRequest = getWriteBinaryReferencesRequest(); // fail twice @@ -88,6 +88,9 @@ public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws E .when(writeBinaryReferencesRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.writeBinaryReferences(new byte[10]); mockServerClient.verify(writeBinaryReferencesRequest, exactly(MAX_ATTEMPTS)); @@ -95,8 +98,7 @@ public void retryWhenFailureOnWriteBinaryReferences_eventuallySucceed() throws E @Test public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest writeGraphRequest = getWriteGraphRequest(); // fail twice @@ -108,6 +110,9 @@ public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { .when(writeGraphRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.writeGraph(new byte[10]); mockServerClient.verify(writeGraphRequest, exactly(MAX_ATTEMPTS)); @@ -115,8 +120,7 @@ public void retryWhenFailureOnWriteGraph_eventuallySucceed() throws Exception { @Test public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest closeArchiveRequest = getCloseArchiveRequest(); // fail twice @@ -128,6 +132,9 @@ public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { .when(closeArchiveRequest, Times.once()) .respond(response().withStatusCode(201)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); + writer.close(); mockServerClient.verify(closeArchiveRequest, exactly(MAX_ATTEMPTS)); @@ -135,8 +142,7 @@ public void retryWhenFailureOnClose_eventuallySucceed() throws Exception { @Test public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception { - SegmentArchiveWriter writer = createSegmentArchiveWriter(); - writeAndFlushSegment(writer); + expectWriteRequests(); HttpRequest closeArchiveRequest = getCloseArchiveRequest(); // always fail @@ -144,6 +150,8 @@ public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception .when(closeArchiveRequest, Times.unlimited()) .respond(response().withStatusCode(500)); + SegmentArchiveWriter writer = createSegmentArchiveWriter(); + writeAndFlushSegment(writer); assertThrows(IOException.class, writer::close); @@ -152,7 +160,6 @@ public void retryWhenFailureOnClose_failAfterLastRetryAttempt() throws Exception private void writeAndFlushSegment(SegmentArchiveWriter writer) throws IOException { - expectWriteRequests(); UUID u = UUID.randomUUID(); writer.writeSegment(u.getMostSignificantBits(), u.getLeastSignificantBits(), new byte[10], 0, 10, 0, 0, false); writer.flush(); @@ -170,6 +177,12 @@ private void expectWriteRequests() { @NotNull private SegmentArchiveWriter createSegmentArchiveWriter() throws IOException { + createContainerMock(); + BlobContainerClient readBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOptionsDefault()); + BlobContainerClient writeBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + writeBlobContainerClient.deleteIfExists(); + writeBlobContainerClient.createIfNotExists(); + WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak");/**/ @@ -182,7 +195,7 @@ private SegmentArchiveWriter createSegmentArchiveWriter() throws IOException { private static HttpRequest getCloseArchiveRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/closed"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fclosed"); } private static HttpRequest getWriteBinaryReferencesRequest() { @@ -194,21 +207,46 @@ private static HttpRequest getWriteBinaryReferencesRequest() { private static HttpRequest getWriteGraphRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/data00000a.tar.gph"); + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2Fdata00000a.tar.gph"); } private static HttpRequest getUploadSegmentMetadataRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2F.*") .withQueryStringParameter("comp", "metadata"); } private static HttpRequest getUploadSegmentDataRequest() { return request() .withMethod("PUT") - .withPath(BASE_PATH + "/oak/data00000a.tar/.*") + .withPath(BASE_PATH + "/oak%2Fdata00000a.tar%2F.*") .withBody(new BinaryBody(new byte[10])); } + private void createContainerMock() { + mockServerClient + .when(request() + .withMethod("PUT") + .withPath(BASE_PATH)) + .respond(response().withStatusCode(201).withBody("Container created successfully")); + } + + public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { + String blobEndpoint = "BlobEndpoint=http://localhost:1080/devstoreaccount1"; + String accountName = "AccountName=" + ACCOUNT_NAME; + String accountKey = "AccountKey=" + ACCOUNT_KEY; + + AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); + + BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .endpoint(azurite.getBlobEndpoint()) + .addPolicy(azureHttpRequestLoggingTestingPolicy) + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)) + .retryOptions(retryOptions) + .buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } + } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java index c00de44aba4..d0651e0fa7a 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java @@ -122,7 +122,7 @@ public BlobContainerClient getCloudStorageAccount(String containerName, RequestR BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() .endpoint(getBlobEndpoint()) .addPolicy(azureHttpRequestLoggingTestingPolicy) - .connectionString(("DefaultEndpointsProtocol=http;" + ";" + accountName + ";" + accountKey + ";" + blobEndpoint)) + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)) .retryOptions(retryOptions) .buildClient(); From 9718bbce5df6c7135e4ff1cbdc681a33aa4da4bc Mon Sep 17 00:00:00 2001 From: ierandra Date: Wed, 2 Oct 2024 18:10:35 +0800 Subject: [PATCH 05/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix tests --- .../oak/segment/azure/AzureSegmentArchiveWriterTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index 9d5897eb0f7..015b60bdf93 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -55,7 +55,7 @@ public class AzureSegmentArchiveWriterTest { public static final int MAX_ATTEMPTS = 3; @Rule - public MockServerRule mockServerRule = new MockServerRule(this, 1080); + public MockServerRule mockServerRule = new MockServerRule(this, 1090); @SuppressWarnings("unused") private MockServerClient mockServerClient; @@ -65,7 +65,7 @@ public class AzureSegmentArchiveWriterTest { @Before public void setUp() throws Exception { - mockServerClient = new MockServerClient("localhost", 1080); + mockServerClient = new MockServerClient("localhost", mockServerRule.getPort()); System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); @@ -233,7 +233,7 @@ private void createContainerMock() { } public BlobContainerClient getCloudStorageAccount(String containerName, RequestRetryOptions retryOptions) { - String blobEndpoint = "BlobEndpoint=http://localhost:1080/devstoreaccount1"; + String blobEndpoint = "BlobEndpoint=http://localhost:" + mockServerRule.getPort() + "/devstoreaccount1"; String accountName = "AccountName=" + ACCOUNT_NAME; String accountKey = "AccountKey=" + ACCOUNT_KEY; From a125e1ea83ae97a852e39bcb06ff4c8c1b80f1c0 Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 29 Oct 2024 17:06:45 +0200 Subject: [PATCH 06/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - remove debug logging --- .../apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java index 298ba40d0ae..384c3c2eb97 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManager.java @@ -186,7 +186,6 @@ public void copyFile(String from, String to) throws IOException { getBlobs(from) .forEach(blobItem -> { try { - log.info("Backup segment: {}", blobItem.getName()); copyBlob(blobItem, targetDirectory); } catch (IOException e) { log.error("Can't copy segment {}", blobItem.getName(), e); From 43df2542032db6c656cc5decab02875cab5f5219 Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 29 Oct 2024 17:32:20 +0200 Subject: [PATCH 07/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review fixes --- .../oak/segment/azure/v8/AzureArchiveManagerV8.java | 2 +- .../jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java index a6e8d6821ab..d33c95b06f8 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureArchiveManagerV8.java @@ -50,7 +50,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.apache.jackrabbit.guava.common.base.Preconditions.checkArgument; +import static org.apache.jackrabbit.oak.commons.conditions.Validate.checkArgument; import static org.apache.jackrabbit.oak.segment.azure.v8.AzureUtilitiesV8.getName; public class AzureArchiveManagerV8 implements SegmentArchiveManager { diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java index 00e92f520a7..0ee8914f5d4 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureJournalFileV8.java @@ -17,6 +17,7 @@ package org.apache.jackrabbit.oak.segment.azure.v8; import org.apache.jackrabbit.guava.common.collect.ImmutableList; +import org.apache.jackrabbit.oak.commons.collections.CollectionUtils; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudAppendBlob; import com.microsoft.azure.storage.blob.CloudBlob; @@ -43,8 +44,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import static org.apache.jackrabbit.guava.common.collect.Lists.partition; - public class AzureJournalFileV8 implements JournalFile { private static final Logger log = LoggerFactory.getLogger(AzureJournalFileV8.class); @@ -220,7 +219,7 @@ public void batchWriteLines(List lines) throws IOException { } int firstBlockSize = Math.min(lineLimit - lineCount, lines.size()); List firstBlock = lines.subList(0, firstBlockSize); - List> remainingBlocks = partition(lines.subList(firstBlockSize, lines.size()), lineLimit); + List> remainingBlocks = CollectionUtils.partitionList(lines.subList(firstBlockSize, lines.size()), lineLimit); List> allBlocks = ImmutableList.>builder() .addAll(firstBlock.isEmpty() ? ImmutableList.of() : ImmutableList.of(firstBlock)) .addAll(remainingBlocks) From 045b4dde40fad48c5988fa8e5bf2fc14b7dcb2bd Mon Sep 17 00:00:00 2001 From: ierandra Date: Fri, 1 Nov 2024 16:14:01 +0200 Subject: [PATCH 08/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix rootPrefix and missing metadata --- .../segment/azure/AzurePersistenceManager.java | 16 +++++++++------- .../oak/segment/azure/AzureUtilities.java | 7 ++----- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java index e990e1e4867..0e0d836ada5 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -29,10 +29,11 @@ public static AzurePersistence createAzurePersistenceFrom(@NotNull String accoun final String clientId = environment.getVariable(AZURE_CLIENT_ID); final String clientSecret = environment.getVariable(AZURE_CLIENT_SECRET); final String tenantId = environment.getVariable(AZURE_TENANT_ID); + final String rootPrefixNormalized = normalizePath(rootPrefix); if (StringUtils.isNoneBlank(clientId, clientSecret, tenantId)) { try { - return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefix, clientId, clientSecret, tenantId, false, false); + return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefixNormalized, clientId, clientSecret, tenantId, false, false); } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { log.error("Error occurred while connecting to Azure Storage using service principals: ", e); throw new IllegalArgumentException( @@ -44,7 +45,7 @@ public static AzurePersistence createAzurePersistenceFrom(@NotNull String accoun String key = environment.getVariable(AZURE_SECRET_KEY); try { - return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefix, false, false); + return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefixNormalized, false, false); } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { log.error("Error occurred while connecting to Azure Storage using secret key: ", e); throw new IllegalArgumentException( @@ -66,7 +67,8 @@ public static AzurePersistence createAzurePersistenceFrom(Configuration configur } private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { - return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), configuration.rootPath(), configuration.enableSecondaryLocation(), true); + final String rootPrefix = normalizePath(configuration.rootPath()); + return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), rootPrefix, configuration.enableSecondaryLocation(), true); } private static AzurePersistence createPersistenceFromAccessKey(String accountName, String containerName, String accessKey, String blobEndpoint, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { @@ -99,8 +101,8 @@ private static AzurePersistence createPersistenceFromSasUri(Configuration config @NotNull private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { - String path = normalizePath(configuration.rootPath()); - return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), path, configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); + String rootPrefix = normalizePath(configuration.rootPath()); + return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), rootPrefix, configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); } private static AzurePersistence createPersistenceFromServicePrincipalCredentials(String accountName, String containerName, String rootPrefix, String clientId, String clientSecret, String tenantId, boolean enableSecondaryLocation, boolean createContainer) throws IOException { @@ -127,8 +129,8 @@ private static AzurePersistence createPersistenceFromServicePrincipalCredentials @NotNull private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { - String path = normalizePath(configuration.rootPath()); - return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), path, configuration.enableSecondaryLocation(), createContainer); + String rootPrefix = normalizePath(configuration.rootPath()); + return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), rootPrefix, configuration.enableSecondaryLocation(), createContainer); } @NotNull diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java index 9c4525abb26..f6743edafc5 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureUtilities.java @@ -18,6 +18,7 @@ import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobListDetails; import com.azure.storage.blob.models.BlobStorageException; import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.specialized.AppendBlobClient; @@ -31,12 +32,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; import java.nio.file.Paths; -import java.security.InvalidKeyException; -import java.util.ArrayList; -import java.util.EnumSet; import java.util.List; import java.util.stream.Collectors; @@ -64,6 +60,7 @@ public static String getName(AppendBlobClient blob) { public static List getBlobs(BlobContainerClient blobContainerClient, ListBlobsOptions listOptions) { + listOptions.setDetails(new BlobListDetails().setRetrieveMetadata(true)); return blobContainerClient.listBlobs(listOptions, null).stream().collect(Collectors.toList()); } From 3ccdb109e0d6e728dd321ac93650debb426cc25b Mon Sep 17 00:00:00 2001 From: ierandra Date: Fri, 1 Nov 2024 16:25:15 +0200 Subject: [PATCH 09/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix rootPrefix issue --- .../azure/AzurePersistenceManager.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java index 0e0d836ada5..3fcb8d51fe1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -29,11 +29,10 @@ public static AzurePersistence createAzurePersistenceFrom(@NotNull String accoun final String clientId = environment.getVariable(AZURE_CLIENT_ID); final String clientSecret = environment.getVariable(AZURE_CLIENT_SECRET); final String tenantId = environment.getVariable(AZURE_TENANT_ID); - final String rootPrefixNormalized = normalizePath(rootPrefix); if (StringUtils.isNoneBlank(clientId, clientSecret, tenantId)) { try { - return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefixNormalized, clientId, clientSecret, tenantId, false, false); + return createPersistenceFromServicePrincipalCredentials(accountName, containerName, rootPrefix, clientId, clientSecret, tenantId, false, false); } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { log.error("Error occurred while connecting to Azure Storage using service principals: ", e); throw new IllegalArgumentException( @@ -45,7 +44,7 @@ public static AzurePersistence createAzurePersistenceFrom(@NotNull String accoun String key = environment.getVariable(AZURE_SECRET_KEY); try { - return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefixNormalized, false, false); + return createPersistenceFromAccessKey(accountName, containerName, key, null, rootPrefix, false, false); } catch (IllegalArgumentException | StringIndexOutOfBoundsException e) { log.error("Error occurred while connecting to Azure Storage using secret key: ", e); throw new IllegalArgumentException( @@ -67,8 +66,7 @@ public static AzurePersistence createAzurePersistenceFrom(Configuration configur } private static AzurePersistence createPersistenceFromAccessKey(Configuration configuration) throws IOException { - final String rootPrefix = normalizePath(configuration.rootPath()); - return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), rootPrefix, configuration.enableSecondaryLocation(), true); + return createPersistenceFromAccessKey(configuration.accountName(), configuration.containerName(), configuration.accessKey(), configuration.blobEndpoint(), configuration.rootPath(), configuration.enableSecondaryLocation(), true); } private static AzurePersistence createPersistenceFromAccessKey(String accountName, String containerName, String accessKey, String blobEndpoint, String rootPrefix, boolean enableSecondaryLocation, boolean createContainer) throws IOException { @@ -101,8 +99,7 @@ private static AzurePersistence createPersistenceFromSasUri(Configuration config @NotNull private static AzurePersistence createPersistenceFromServicePrincipalCredentials(Configuration configuration) throws IOException { - String rootPrefix = normalizePath(configuration.rootPath()); - return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), rootPrefix, configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); + return createPersistenceFromServicePrincipalCredentials(configuration.accountName(), configuration.containerName(), configuration.rootPath(), configuration.clientId(), configuration.clientSecret(), configuration.tenantId(), configuration.enableSecondaryLocation(), true); } private static AzurePersistence createPersistenceFromServicePrincipalCredentials(String accountName, String containerName, String rootPrefix, String clientId, String clientSecret, String tenantId, boolean enableSecondaryLocation, boolean createContainer) throws IOException { @@ -124,13 +121,14 @@ private static AzurePersistence createPersistenceFromServicePrincipalCredentials blobContainerClient.createIfNotExists(); } - return new AzurePersistence(blobContainerClient, writeContainerClient, rootPrefix, azureHttpRequestLoggingPolicy); + final String rootPrefixNormalized = normalizePath(rootPrefix); + + return new AzurePersistence(blobContainerClient, writeContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); } @NotNull private static AzurePersistence createAzurePersistence(String connectionString, Configuration configuration, boolean createContainer) throws IOException { - String rootPrefix = normalizePath(configuration.rootPath()); - return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), rootPrefix, configuration.enableSecondaryLocation(), createContainer); + return createAzurePersistence(connectionString, configuration.accountName(), configuration.containerName(), configuration.rootPath(), configuration.enableSecondaryLocation(), createContainer); } @NotNull @@ -148,7 +146,9 @@ private static AzurePersistence createAzurePersistence(String connectionString, blobContainerClient.createIfNotExists(); } - return new AzurePersistence(blobContainerClient, writeBlobContainerClient, rootPrefix, azureHttpRequestLoggingPolicy); + final String rootPrefixNormalized = normalizePath(rootPrefix); + + return new AzurePersistence(blobContainerClient, writeBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); } catch (BlobStorageException e) { throw new IOException(e); } From 427ca790791c823d4c4735c6684b0ef0f97f60fb Mon Sep 17 00:00:00 2001 From: ierandra Date: Mon, 4 Nov 2024 15:15:06 +0200 Subject: [PATCH 10/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review --- oak-parent/pom.xml | 2 +- .../apache/jackrabbit/oak/segment/azure/util/package-info.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/oak-parent/pom.xml b/oak-parent/pom.xml index 8694aecf53e..1b8d0308b1c 100644 --- a/oak-parent/pom.xml +++ b/oak-parent/pom.xml @@ -695,7 +695,7 @@ com.fasterxml.jackson.dataformat - jackson-dataformat-xml + jackson-dataformat-xml ${jackson.version} diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java index 792686fe8b9..2cd4cf1b0a6 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java @@ -15,7 +15,7 @@ * limitations under the License. */ @Internal(since = "1.0.0") -@Version("2.0.0") +@Version("1.0.0") package org.apache.jackrabbit.oak.segment.azure.util; import org.apache.jackrabbit.oak.commons.annotations.Internal; From a33dd37a926fcb38248505020cb31df935f7fd9b Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 12 Nov 2024 12:17:11 +0200 Subject: [PATCH 11/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix rootPrefix issue --- .../oak/segment/azure/AzureHttpRequestLoggingPolicy.java | 4 +--- .../jackrabbit/oak/segment/azure/util/package-info.java | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java index cf029f64ba7..f07b5c5dd3b 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java @@ -40,9 +40,7 @@ public Mono process(HttpPipelineCallContext context, HttpPipelineN } if (verboseEnabled) { - log.info("HTTP Request: {} {}", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl()); - log.info("Status code is: {}", httpResponse.getStatusCode()); - log.info("Response time: {}ms", (stopwatch.elapsed(TimeUnit.NANOSECONDS))/1_000_000); + log.info("HTTP Request: {} {} {} {}ms", context.getHttpRequest().getHttpMethod(), context.getHttpRequest().getUrl(), httpResponse.getStatusCode(), (stopwatch.elapsed(TimeUnit.NANOSECONDS))/1_000_000); } return Mono.just(httpResponse); diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java index 2cd4cf1b0a6..792686fe8b9 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/package-info.java @@ -15,7 +15,7 @@ * limitations under the License. */ @Internal(since = "1.0.0") -@Version("1.0.0") +@Version("2.0.0") package org.apache.jackrabbit.oak.segment.azure.util; import org.apache.jackrabbit.oak.commons.annotations.Internal; From 931cc3bae6245df686d2206586dfda32f3194f78 Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 12 Nov 2024 17:28:41 +0200 Subject: [PATCH 12/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review --- .../azure/AzureHttpRequestLoggingPolicy.java | 18 ++++++++++++++++++ .../segment/azure/AzurePersistenceManager.java | 16 ++++++++++++++++ .../azure/AzureSegmentStoreService.java | 8 ++++---- ...ServiceV8.java => AzureSegmentStoreV8.java} | 16 ++-------------- .../AzureHttpRequestLoggingPolicyTest.java | 18 ++++++++++++++++++ .../AzureHttpRequestLoggingTestingPolicy.java | 18 ++++++++++++++++++ ...8Test.java => AzureSegmentStoreV8Test.java} | 2 +- 7 files changed, 77 insertions(+), 19 deletions(-) rename oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/{AzureSegmentStoreServiceV8.java => AzureSegmentStoreV8.java} (92%) rename oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/{AzureSegmentStoreServiceV8Test.java => AzureSegmentStoreV8Test.java} (99%) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java index f07b5c5dd3b..0c59c5d3439 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicy.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.jackrabbit.oak.segment.azure; import com.azure.core.http.HttpPipelineCallContext; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java index 3fcb8d51fe1..7fd64876db7 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.jackrabbit.oak.segment.azure; import com.azure.identity.ClientSecretCredential; diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java index 1465574d83d..5d33fa25798 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentStoreService.java @@ -19,7 +19,7 @@ package org.apache.jackrabbit.oak.segment.azure; import org.apache.jackrabbit.oak.segment.azure.v8.AzurePersistenceV8; -import org.apache.jackrabbit.oak.segment.azure.v8.AzureSegmentStoreServiceV8; +import org.apache.jackrabbit.oak.segment.azure.v8.AzureSegmentStoreV8; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence; import org.osgi.framework.ServiceRegistration; import org.osgi.service.component.ComponentContext; @@ -57,7 +57,7 @@ public class AzureSegmentStoreService { @Activate public void activate(ComponentContext context, Configuration config) throws IOException { if (useAzureSdkV12) { - log.info("Starting nodestore using Azure SDK 12"); + log.info("Starting node store using Azure SDK 12"); AzurePersistence persistence = AzurePersistenceManager.createAzurePersistenceFrom(config); registration = context.getBundleContext() .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ @@ -67,8 +67,8 @@ public void activate(ComponentContext context, Configuration config) throws IOEx } }}); } else { - log.info("Starting nodestore using Azure SDK 8"); - AzurePersistenceV8 persistence = AzureSegmentStoreServiceV8.createAzurePersistenceFrom(config); + log.info("Starting node store using Azure SDK 8"); + AzurePersistenceV8 persistence = AzureSegmentStoreV8.createAzurePersistenceFrom(config); registration = context.getBundleContext() .registerService(SegmentNodeStorePersistence.class, persistence, new Hashtable() {{ put(SERVICE_PID, String.format("%s(%s, %s)", AzurePersistenceV8.class.getName(), config.accountName(), config.rootPath())); diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java similarity index 92% rename from oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java rename to oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java index 144e0e643fd..2e2bf2baf04 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8.java @@ -30,9 +30,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.jackrabbit.oak.segment.azure.Configuration; import org.jetbrains.annotations.NotNull; -import org.osgi.framework.ServiceRegistration; -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.ConfigurationPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,21 +37,12 @@ import java.net.URISyntaxException; import java.security.InvalidKeyException; -@Component( - configurationPolicy = ConfigurationPolicy.REQUIRE, - configurationPid = {Configuration.PID}) -public class AzureSegmentStoreServiceV8 { +public class AzureSegmentStoreV8 { - private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreServiceV8.class); + private static final Logger log = LoggerFactory.getLogger(AzureSegmentStoreV8.class); - public static final String DEFAULT_CONTAINER_NAME = "oak"; - - public static final String DEFAULT_ROOT_PATH = "/oak"; - - public static final boolean DEFAULT_ENABLE_SECONDARY_LOCATION = false; public static final String DEFAULT_ENDPOINT_SUFFIX = "core.windows.net"; - private ServiceRegistration registration; private static AzureStorageCredentialManagerV8 azureStorageCredentialManagerV8; public static AzurePersistenceV8 createAzurePersistenceFrom(Configuration configuration) throws IOException { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java index fb09ce2f81e..59edf558d5e 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingPolicyTest.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.jackrabbit.oak.segment.azure; import com.azure.core.http.HttpPipelineCallContext; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java index 25dba3f3bb1..d76554e3caa 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureHttpRequestLoggingTestingPolicy.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.jackrabbit.oak.segment.azure; import com.azure.core.http.HttpPipelineCallContext; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java similarity index 99% rename from oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java rename to oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java index de8e5c13bb2..0b8b84415a0 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreServiceV8Test.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentStoreV8Test.java @@ -59,7 +59,7 @@ import static org.junit.Assert.fail; import static org.junit.Assume.assumeNotNull; -public class AzureSegmentStoreServiceV8Test { +public class AzureSegmentStoreV8Test { private static final Environment ENVIRONMENT = new Environment(); @ClassRule From 63748600f4c7a58355071484d72d6826de4e3657 Mon Sep 17 00:00:00 2001 From: ierandra Date: Thu, 14 Nov 2024 16:48:24 +0200 Subject: [PATCH 13/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review --- .../oak/segment/azure/AzureRepositoryLock.java | 3 --- .../segment/azure/util/AzureRequestOptions.java | 8 ++++---- .../azure/AzureSegmentArchiveWriterTest.java | 4 ++-- .../oak/segment/azure/MockAzureHttpResponse.java | 16 ++++++++++++++++ 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java index e60f94d3802..9fa545e3399 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLock.java @@ -148,9 +148,6 @@ private void refreshLease() { BlobErrorCode.SERVER_BUSY, BlobErrorCode.INTERNAL_ERROR).contains(storageException.getErrorCode())) { log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); - //TODO: ierandra - } else if (storageException.getStatusCode() == 306) { - log.warn("Client side error. Retry in progress ...", e); } else { log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); } diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index 7ddb8067c9c..8aec048eddd 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -49,13 +49,13 @@ public static RequestRetryOptions getRetryOptionsDefault() { } public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { - int retryAttempts = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); long timeoutIntervalToMs = timeoutInterval * 1_000L; long timeoutIntervalMax = timeoutIntervalToMs * 5; - return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + return new RequestRetryOptions(RetryPolicyType.FIXED, retryAttempts, timeoutExecution, timeoutIntervalToMs, @@ -68,13 +68,13 @@ public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { * @return */ public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { - int retryAttempts = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS); + int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); long writeTimeoutIntervalToMs = writeTimeoutInterval * 1_000L; long writeTimeoutIntervalMax = writeTimeoutIntervalToMs * 5; - return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + return new RequestRetryOptions(RetryPolicyType.FIXED, retryAttempts, writeTimeoutExecution, writeTimeoutIntervalToMs, diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index 015b60bdf93..82e923cc40b 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -55,7 +55,7 @@ public class AzureSegmentArchiveWriterTest { public static final int MAX_ATTEMPTS = 3; @Rule - public MockServerRule mockServerRule = new MockServerRule(this, 1090); + public MockServerRule mockServerRule = new MockServerRule(this); @SuppressWarnings("unused") private MockServerClient mockServerClient; @@ -70,7 +70,7 @@ public void setUp() throws Exception { System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.backoff", "1"); + System.setProperty("segment.azure.retry.attempts", "1"); System.setProperty("segment.timeout.execution", "1"); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java index 35e82ddb6da..f1b1ba563b2 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/MockAzureHttpResponse.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.jackrabbit.oak.segment.azure; import com.azure.core.http.HttpHeaders; From 12c5eaea6b2a4de348b110cf2adcf295d74bf106 Mon Sep 17 00:00:00 2001 From: ierandra Date: Wed, 20 Nov 2024 12:19:42 +0200 Subject: [PATCH 14/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review --- .../oak/segment/azure/AzurePersistence.java | 22 +++++++--- .../azure/AzurePersistenceManager.java | 19 ++++++--- .../azure/AzureArchiveManagerTest.java | 42 ++++++++++--------- .../oak/segment/azure/AzureGCJournalTest.java | 4 +- .../segment/azure/AzureManifestFileTest.java | 4 +- .../segment/azure/AzureReadSegmentTest.java | 15 ++++--- .../azure/AzureRepositoryLockTest.java | 27 +++++++----- .../azure/AzureSegmentArchiveWriterTest.java | 15 ++++--- .../oak/segment/azure/AzureTarFileTest.java | 6 ++- .../oak/segment/azure/AzureTarFilesTest.java | 9 ++-- .../oak/segment/azure/AzuriteDockerRule.java | 16 +++++-- .../azure/fixture/SegmentAzureFixture.java | 33 ++++++++------- .../azure/journal/AzureJournalReaderTest.java | 1 - .../azure/journal/AzureTarRevisionsTest.java | 4 +- .../split/SplitPersistenceBlobTest.java | 3 +- .../split/SplitPersistenceTest.java | 3 +- 16 files changed, 143 insertions(+), 80 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java index 0768875fdc6..a0f0ddded5a 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistence.java @@ -43,7 +43,9 @@ public class AzurePersistence implements SegmentNodeStorePersistence { protected final BlobContainerClient readBlobContainerClient; - protected BlobContainerClient writeBlobContainerClient; + protected final BlobContainerClient writeBlobContainerClient; + + protected final BlobContainerClient noRetryBlobContainerClient; protected final String rootPrefix; @@ -51,13 +53,14 @@ public class AzurePersistence implements SegmentNodeStorePersistence { protected WriteAccessController writeAccessController = new WriteAccessController(); - public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix) { - this(readBlobContainerClient, writeBlobContainerClient, rootPrefix, null); + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix) { + this(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefix, null); } - public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { + public AzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { this.readBlobContainerClient = readBlobContainerClient; this.writeBlobContainerClient = writeBlobContainerClient; + this.noRetryBlobContainerClient = noRetryBlobContainerClient; this.azureHttpRequestLoggingPolicy = azureHttpRequestLoggingPolicy; this.rootPrefix = rootPrefix; } @@ -98,7 +101,8 @@ public ManifestFile getManifestFile() throws IOException { @Override public RepositoryLock lockRepository() throws IOException { BlockBlobClient blockBlobClient = getBlockBlob("repo.lock"); - BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + BlockBlobClient noRetryBlockBlobClient = getNoRetryBlockBlob("repo.lock"); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetryBlockBlobClient).buildClient(); return new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> { log.warn("Lost connection to the Azure. The client will be closed."); // TODO close the connection @@ -113,6 +117,14 @@ private BlockBlobClient getBlockBlob(String path) throws IOException { } } + private BlockBlobClient getNoRetryBlockBlob(String path) throws IOException { + try { + return noRetryBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getBlockBlobClient(); + } catch (BlobStorageException e) { + throw new IOException(e); + } + } + private AppendBlobClient getAppendBlob(String path) throws IOException { try { return readBlobContainerClient.getBlobClient(rootPrefix + "/" + path).getAppendBlobClient(); diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java index 7fd64876db7..12fee4c31c3 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzurePersistenceManager.java @@ -133,13 +133,15 @@ private static AzurePersistence createPersistenceFromServicePrincipalCredentials RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); BlobContainerClient writeContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, clientSecretCredential); + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(accountName, containerName, null, azureHttpRequestLoggingPolicy, clientSecretCredential); + if (createContainer) { blobContainerClient.createIfNotExists(); } final String rootPrefixNormalized = normalizePath(rootPrefix); - return new AzurePersistence(blobContainerClient, writeContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); + return new AzurePersistence(blobContainerClient, writeContainerClient, noRetryBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); } @NotNull @@ -158,13 +160,15 @@ private static AzurePersistence createAzurePersistence(String connectionString, RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); BlobContainerClient writeBlobContainerClient = getBlobContainerClient(accountName, containerName, writeRetryOptions, azureHttpRequestLoggingPolicy, connectionString); + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(accountName, containerName, null, azureHttpRequestLoggingPolicy, connectionString); + if (createContainer) { blobContainerClient.createIfNotExists(); } final String rootPrefixNormalized = normalizePath(rootPrefix); - return new AzurePersistence(blobContainerClient, writeBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); + return new AzurePersistence(blobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefixNormalized, azureHttpRequestLoggingPolicy); } catch (BlobStorageException e) { throw new IOException(e); } @@ -189,10 +193,15 @@ private static BlobContainerClient getBlobContainerClient(String accountName, St private static BlobServiceClientBuilder blobServiceClientBuilder(String accountName, RequestRetryOptions requestRetryOptions, AzureHttpRequestLoggingPolicy azureHttpRequestLoggingPolicy) { String endpoint = String.format("https://%s.blob.core.windows.net", accountName); - return new BlobServiceClientBuilder() + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() .endpoint(endpoint) - .addPolicy(azureHttpRequestLoggingPolicy) - .retryOptions(requestRetryOptions); + .addPolicy(azureHttpRequestLoggingPolicy); + + if (requestRetryOptions != null) { + builder.retryOptions(requestRetryOptions); + } + + return builder; } private static RequestRetryOptions readRequestRetryOptions(boolean enableSecondaryLocation, String accountName) { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java index 62fe73d4cb0..e52f216bc89 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureArchiveManagerTest.java @@ -90,6 +90,7 @@ public class AzureArchiveManagerTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; private AzurePersistence azurePersistence; @@ -97,10 +98,11 @@ public class AzureArchiveManagerTest { public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); azurePersistence.setWriteAccessController(writeAccessController); } @@ -154,17 +156,17 @@ public void testBackupWithRecoveredEntries() throws BlobStorageException, IOExce manager.backup("data00000a.tar", "data00000a.tar.bak", recovered.keySet()); for (int i = 0; i <= 4; i++) { - assertTrue(readBlobContainerClient.getBlobClient("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertTrue(readBlobContainerClient.getBlobClient("oak/data00000a.tar/000" + i + "." + uuids.get(i)).exists()); } for (int i = 5; i <= 9; i++) { - assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000"+ i), readBlobContainerClient.getBlobClient("oak/data00000a.tar/000"+ i + "." + uuids.get(i)).exists()); + assertFalse(String.format("Segment %s.??? should have been deleted.", "oak/data00000a.tar/000" + i), readBlobContainerClient.getBlobClient("oak/data00000a.tar/000" + i + "." + uuids.get(i)).exists()); } } @Test public void testUncleanStop() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { - AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -186,7 +188,7 @@ public void testUncleanStop() throws IOException, InvalidFileStoreVersionExcepti @Test // see OAK-8566 public void testUncleanStopWithEmptyArchive() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { - AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -218,7 +220,7 @@ public void testUncleanStopWithEmptyArchive() throws IOException, InvalidFileSto @Test public void testUncleanStopSegmentMissing() throws IOException, InvalidFileStoreVersionException, CommitFailedException, BlobStorageException { - AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fs).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -345,7 +347,7 @@ public void testSegmentDeletedAfterCreatingReader() throws IOException, BlobStor @Test(expected = SegmentNotFoundException.class) public void testMissingSegmentDetectedInFileStore() throws IOException, BlobStorageException, InvalidFileStoreVersionException { - AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(azurePersistence).build(); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); @@ -372,7 +374,7 @@ public void testMissingSegmentDetectedInFileStore() throws IOException, BlobStor @Test public void testReadOnlyRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { - AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -382,12 +384,12 @@ public void testReadOnlyRecovery() throws InvalidFileStoreVersionException, IOEx ListBlobsOptions listOptions = new ListBlobsOptions(); listOptions.setPrefix("oak/data00000a.tar"); - assertTrue(readBlobContainerClient.listBlobs(listOptions,null).iterator().hasNext()); + assertTrue(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); listOptions.setPrefix("oak/data00000a.tar.ro.bak"); assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create read-only FS - AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly(); PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -408,7 +410,7 @@ public void testReadOnlyRecovery() throws InvalidFileStoreVersionException, IOEx @Test public void testCachingPersistenceTarRecovery() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { - AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(rwPersistence).build(); SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -423,7 +425,7 @@ public void testCachingPersistenceTarRecovery() throws InvalidFileStoreVersionEx assertFalse(readBlobContainerClient.listBlobs(listOptions, null).iterator().hasNext()); // create files store with split persistence - AzurePersistence azureSharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence azureSharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); CachingPersistence cachingPersistence = new CachingPersistence(createPersistenceCache(), azureSharedPersistence); File localFolder = folder.newFolder(); @@ -442,7 +444,7 @@ public void testCachingPersistenceTarRecovery() throws InvalidFileStoreVersionEx @Test public void testCollectBlobReferencesForReadOnlyFileStore() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { - AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -454,7 +456,7 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws InvalidFileSt assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -472,7 +474,7 @@ public void testCollectBlobReferencesForReadOnlyFileStore() throws InvalidFileSt @Test public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws InvalidFileStoreVersionException, IOException, CommitFailedException, BlobStorageException { - AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence rwPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (FileStore rwFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(rwPersistence).build()) { SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(rwFileStore).build(); NodeBuilder builder = segmentNodeStore.getRoot().builder(); @@ -484,7 +486,7 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws Inval assertFalse("brf file should not be present", readBlobContainerClient.getBlobClient("oak/data00000a.tar/data00000a.tar.brf").exists()); // create read-only FS, while the rw FS is still open - AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence roPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); try (ReadOnlyFileStore roFileStore = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(roPersistence).buildReadOnly()) { PropertyState fooProperty = SegmentNodeStoreBuilders.builder(roFileStore).build() @@ -507,7 +509,8 @@ public void testCollectBlobReferencesDoesNotFailWhenFileIsMissing() throws Inval public void testWriteAfterLosingRepoLock() throws Exception { BlobContainerClient oakDirectory = readBlobContainerClient.getBlobClient("oak").getContainerClient(); BlobContainerClient writeOakDirectory = writeBlobContainerClient.getBlobClient("oak").getContainerClient(); - AzurePersistence rwPersistence = new AzurePersistence(oakDirectory, writeOakDirectory, ""); + BlobContainerClient noRetryOakDirectory = noRetryBlobContainerClient.getBlobClient("oak").getContainerClient(); + AzurePersistence rwPersistence = new AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, ""); BlockBlobClient blob = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); BlobLeaseClient leaseClient = new BlobLeaseClientBuilder().blobClient(blob).buildClient(); @@ -522,7 +525,8 @@ public void testWriteAfterLosingRepoLock() throws Exception { AzurePersistence mockedRwPersistence = Mockito.spy(rwPersistence); WriteAccessController writeAccessController = new WriteAccessController(); - AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> {}, writeAccessController); + AzureRepositoryLock azureRepositoryLock = new AzureRepositoryLock(blobMocked, blobLeaseMocked, () -> { + }, writeAccessController); AzureArchiveManager azureArchiveManager = new AzureArchiveManager(oakDirectory, writeOakDirectory, "", new IOMonitorAdapter(), new FileStoreMonitorAdapter(), writeAccessController); @@ -570,7 +574,7 @@ public void testWriteAfterLosingRepoLock() throws Exception { Thread.sleep(2000); // It should be possible to start another RW file store. - FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory, writeOakDirectory, "")).build(); + FileStore rwFileStore2 = FileStoreBuilder.fileStoreBuilder(folder.newFolder()).withCustomPersistence(new AzurePersistence(oakDirectory, writeOakDirectory, noRetryOakDirectory, "")).build(); SegmentNodeStore segmentNodeStore2 = SegmentNodeStoreBuilders.builder(rwFileStore2).build(); NodeBuilder builder2 = segmentNodeStore2.getRoot().builder(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java index d6cc40a0221..dcd08ca3d2d 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureGCJournalTest.java @@ -35,16 +35,18 @@ public class AzureGCJournalTest extends GcJournalTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before public void setup() throws BlobStorageException { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Override protected SegmentNodeStorePersistence getPersistence() throws Exception { - return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); } @Test diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java index 24bd3f1dd99..241f42f2dd7 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureManifestFileTest.java @@ -40,16 +40,18 @@ public class AzureManifestFileTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Test public void testManifest() throws IOException { - ManifestFile manifestFile = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak").getManifestFile(); + ManifestFile manifestFile = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak").getManifestFile(); assertFalse(manifestFile.exists()); Properties props = new Properties(); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java index ba91e852556..cbc3d2cf7c1 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureReadSegmentTest.java @@ -49,16 +49,18 @@ public class AzureReadSegmentTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); } @Test(expected = SegmentNotFoundException.class) public void testReadNonExistentSegmentRepositoryReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { - AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence p = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -71,7 +73,7 @@ public void testReadNonExistentSegmentRepositoryReachable() throws IOException, @Test(expected = RepositoryNotReachableException.class) public void testReadExistentSegmentRepositoryNotReachable() throws IOException, InvalidFileStoreVersionException, BlobStorageException { - AzurePersistence p = new ReadFailingAzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + AzurePersistence p = new ReadFailingAzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); FileStore fs = FileStoreBuilder.fileStoreBuilder(new File("target")).withCustomPersistence(p).build(); SegmentId id = new SegmentId(fs, 0, 0); @@ -86,14 +88,14 @@ public void testReadExistentSegmentRepositoryNotReachable() throws IOException, } static class ReadFailingAzurePersistence extends AzurePersistence { - public ReadFailingAzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, String rootPrefix) { - super(readBlobContainerClient, writeBlobContainerClient, rootPrefix); + public ReadFailingAzurePersistence(BlobContainerClient readBlobContainerClient, BlobContainerClient writeBlobContainerClient, BlobContainerClient noRetryBlobContainerClient, String rootPrefix) { + super(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, rootPrefix); } @Override public SegmentArchiveManager createArchiveManager(boolean mmap, boolean offHeapAccess, IOMonitor ioMonitor, FileStoreMonitor fileStoreMonitor, RemoteStoreMonitor remoteStoreMonitor) { - return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix,ioMonitor, fileStoreMonitor, writeAccessController) { + return new AzureArchiveManager(readBlobContainerClient, writeBlobContainerClient, rootPrefix, ioMonitor, fileStoreMonitor, writeAccessController) { @Override public SegmentArchiveReader open(String archiveName) throws IOException { return new AzureSegmentArchiveReader(readBlobContainerClient, rootPrefix, archiveName, ioMonitor) { @@ -111,7 +113,8 @@ public SegmentArchiveWriter create(String archiveName) throws IOException { @Override public Buffer readSegment(long msb, long lsb) throws IOException { throw new RepositoryNotReachableException( - new RuntimeException("Cannot access Azure storage")); } + new RuntimeException("Cannot access Azure storage")); + } }; } }; diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java index 619621af54d..077f344fad0 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureRepositoryLockTest.java @@ -57,11 +57,13 @@ public class AzureRepositoryLockTest { @ClassRule public static AzuriteDockerRule azurite = new AzuriteDockerRule(); - private BlobContainerClient container; + private BlobContainerClient noRetryBlobContainerClient; + private BlobContainerClient readBlobContainerClient; @Before public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxException { - container = azurite.getReadBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); } @Rule @@ -71,8 +73,9 @@ public void setup() throws BlobStorageException, InvalidKeyException, URISyntaxE @Test public void testFailingLock() throws IOException, BlobStorageException { - BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); - BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); try { new AzureRepositoryLock(blockBlobClient, blobLeaseClient, () -> {}, new WriteAccessController()).lock(); @@ -84,8 +87,9 @@ public void testFailingLock() throws IOException, BlobStorageException { @Test public void testWaitingLock() throws BlobStorageException, InterruptedException, IOException { - BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); - BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); Semaphore s = new Semaphore(0); new Thread(() -> { try { @@ -104,8 +108,9 @@ public void testWaitingLock() throws BlobStorageException, InterruptedException, @Test public void testLeaseRefreshUnsuccessful() throws BlobStorageException, IOException, InterruptedException { - BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); - BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetryBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetryBlockBlobClient).buildClient(); BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); @@ -136,9 +141,9 @@ public void testLeaseRefreshUnsuccessful() throws BlobStorageException, IOExcept @Test public void testWritesBlockedOnlyAfterFewUnsuccessfulAttempts() throws Exception { - - BlockBlobClient blockBlobClient = container.getBlobClient("oak/repo.lock").getBlockBlobClient(); - BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(blockBlobClient).buildClient(); + BlockBlobClient blockBlobClient = readBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlockBlobClient noRetrtBlockBlobClient = noRetryBlobContainerClient.getBlobClient("oak/repo.lock").getBlockBlobClient(); + BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder().blobClient(noRetrtBlockBlobClient).buildClient(); BlockBlobClient blobMocked = Mockito.spy(blockBlobClient); BlobLeaseClient blobLeaseMocked = Mockito.spy(blobLeaseClient); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index 82e923cc40b..14c69d893ca 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -180,12 +180,13 @@ private SegmentArchiveWriter createSegmentArchiveWriter() throws IOException { createContainerMock(); BlobContainerClient readBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOptionsDefault()); BlobContainerClient writeBlobContainerClient = getCloudStorageAccount("oak-test", AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); + BlobContainerClient noRetryBlobContainerClient = getCloudStorageAccount("oak-test", null); writeBlobContainerClient.deleteIfExists(); writeBlobContainerClient.createIfNotExists(); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak");/**/ + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak");/**/ azurePersistence.setWriteAccessController(writeAccessController); SegmentArchiveManager manager = azurePersistence.createArchiveManager(false, false, new IOMonitorAdapter(), new FileStoreMonitorAdapter(), new RemoteStoreMonitorAdapter()); SegmentArchiveWriter writer = manager.create("data00000a.tar"); @@ -239,12 +240,16 @@ public BlobContainerClient getCloudStorageAccount(String containerName, RequestR AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); - BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() .endpoint(azurite.getBlobEndpoint()) .addPolicy(azureHttpRequestLoggingTestingPolicy) - .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)) - .retryOptions(retryOptions) - .buildClient(); + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)); + + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); return blobServiceClient.getBlobContainerClient(containerName); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java index 31690e03e05..9c4aad9aebb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFileTest.java @@ -39,13 +39,17 @@ public class AzureTarFileTest extends TarFileTest { private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; + @Before @Override public void setUp() throws IOException { try { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); - AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); azurePersistence.setWriteAccessController(writeAccessController); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java index fbd872c359d..afe76eeeee9 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureTarFilesTest.java @@ -33,16 +33,19 @@ public class AzureTarFilesTest extends TarFilesTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before @Override public void setUp() throws Exception { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); - AzurePersistence azurePersistenceV8 = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); + + AzurePersistence azurePersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); WriteAccessController writeAccessController = new WriteAccessController(); writeAccessController.enableWriting(); - azurePersistenceV8.setWriteAccessController(writeAccessController); + azurePersistence.setWriteAccessController(writeAccessController); tarFiles = TarFiles.builder() .withDirectory(folder.newFolder()) .withTarRecovery((id, data, recovery) -> { @@ -52,7 +55,7 @@ public void setUp() throws Exception { .withFileStoreMonitor(new FileStoreMonitorAdapter()) .withRemoteStoreMonitor(new RemoteStoreMonitorAdapter()) .withMaxFileSize(MAX_FILE_SIZE) - .withPersistence(azurePersistenceV8) + .withPersistence(azurePersistence) .build(); } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java index d0651e0fa7a..187e85afb9a 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzuriteDockerRule.java @@ -107,6 +107,11 @@ public BlobContainerClient getReadBlobContainerClient(String name) throws BlobSt return cloud; } + public BlobContainerClient getNoRetryBlobContainerClient(String name) throws BlobStorageException { + BlobContainerClient cloud = getCloudStorageAccount(name, null); + return cloud; + } + public BlobContainerClient getWriteBlobContainerClient(String name) throws BlobStorageException { BlobContainerClient cloud = getCloudStorageAccount(name, AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations()); return cloud; @@ -119,12 +124,15 @@ public BlobContainerClient getCloudStorageAccount(String containerName, RequestR AzureHttpRequestLoggingTestingPolicy azureHttpRequestLoggingTestingPolicy = new AzureHttpRequestLoggingTestingPolicy(); - BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() .endpoint(getBlobEndpoint()) .addPolicy(azureHttpRequestLoggingTestingPolicy) - .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)) - .retryOptions(retryOptions) - .buildClient(); + .connectionString(("DefaultEndpointsProtocol=http;" + accountName + ";" + accountKey + ";" + blobEndpoint)); + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); return blobServiceClient.getBlobContainerClient(containerName); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java index 4229ba3b001..deedd7b1758 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/fixture/SegmentAzureFixture.java @@ -61,26 +61,16 @@ public NodeStore createNodeStore() { String endpoint = String.format("https://%s.blob.core.windows.net", containerName); RequestRetryOptions retryOptions = AzureRequestOptions.getRetryOptionsDefault(); - BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() - .endpoint(endpoint) - .connectionString(AZURE_CONNECTION_STRING) - .retryOptions(retryOptions) - .buildClient(); - - BlobContainerClient reaBlobContainerClient = blobServiceClient.getBlobContainerClient(containerName); + BlobContainerClient reaBlobContainerClient = getBlobContainerClient(retryOptions, endpoint, containerName); RequestRetryOptions writeRetryOptions = AzureRequestOptions.getRetryOperationsOptimiseForWriteOperations(); - BlobServiceClient writeBlobServiceClient = new BlobServiceClientBuilder() - .endpoint(endpoint) - .connectionString(AZURE_CONNECTION_STRING) - .retryOptions(writeRetryOptions) - .buildClient(); - - writeBlobContainerClient = writeBlobServiceClient.getBlobContainerClient(containerName); + writeBlobContainerClient = getBlobContainerClient(writeRetryOptions, endpoint, containerName); writeBlobContainerClient.createIfNotExists(); - persistence = new AzurePersistence(reaBlobContainerClient, writeBlobContainerClient, AZURE_ROOT_PATH); + BlobContainerClient noRetryBlobContainerClient = getBlobContainerClient(null, endpoint, containerName); + + persistence = new AzurePersistence(reaBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, AZURE_ROOT_PATH); } catch (BlobStorageException e) { throw new RuntimeException(e); } @@ -115,4 +105,17 @@ public void dispose(NodeStore nodeStore) { public String toString() { return "SegmentAzure"; } + + private BlobContainerClient getBlobContainerClient(RequestRetryOptions retryOptions, String endpoint, String containerName) { + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(endpoint) + .connectionString(AZURE_CONNECTION_STRING); + if (retryOptions != null) { + builder.retryOptions(retryOptions); + } + + BlobServiceClient blobServiceClient = builder.buildClient(); + + return blobServiceClient.getBlobContainerClient(containerName); + } } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java index a7dea9cc474..5a72b29dceb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureJournalReaderTest.java @@ -30,7 +30,6 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; import java.security.InvalidKeyException; public class AzureJournalReaderTest extends JournalReaderTest { diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java index f49fb889b7c..03c635d0551 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/journal/AzureTarRevisionsTest.java @@ -34,18 +34,20 @@ public class AzureTarRevisionsTest extends TarRevisionsTest { private BlobContainerClient readBlobContainerClient; private BlobContainerClient writeBlobContainerClient; + private BlobContainerClient noRetryBlobContainerClient; @Before public void setup() throws Exception { readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); super.setup(); } @Override protected SegmentNodeStorePersistence getPersistence() throws IOException { try { - return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, "oak"); + return new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient, "oak"); } catch (BlobStorageException e) { throw new IOException(e); } diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java index 1397bfcb8c8..4cdab554dcb 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceBlobTest.java @@ -79,9 +79,10 @@ public class SplitPersistenceBlobTest { public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + BlobContainerClient noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); SegmentNodeStorePersistence sharedPersistence = - new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient,"oak"); File dataStoreDir = new File(folder.getRoot(), "blobstore"); BlobStore blobStore = newBlobStore(dataStoreDir); diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java index 6b60bc5f7c7..49c0ac6ae87 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/spi/persistence/split/SplitPersistenceTest.java @@ -74,8 +74,9 @@ public class SplitPersistenceTest { public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, BlobStorageException { BlobContainerClient readBlobContainerClient = azurite.getReadBlobContainerClient("oak-test"); BlobContainerClient writeBlobContainerClient = azurite.getWriteBlobContainerClient("oak-test"); + BlobContainerClient noRetryBlobContainerClient = azurite.getNoRetryBlobContainerClient("oak-test"); - SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient,"oak"); + SegmentNodeStorePersistence sharedPersistence = new AzurePersistence(readBlobContainerClient, writeBlobContainerClient, noRetryBlobContainerClient,"oak"); baseFileStore = FileStoreBuilder .fileStoreBuilder(folder.newFolder()) From 84180d8007b70d1562f3c7c32ea6db093ccb3dac Mon Sep 17 00:00:00 2001 From: ierandra Date: Wed, 20 Nov 2024 16:25:48 +0200 Subject: [PATCH 15/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR merge --- .../azure/v8/AzureRepositoryLockV8.java | 117 +++++++++++------- 1 file changed, 73 insertions(+), 44 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java index c957af7236f..0164a82a4d1 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureRepositoryLockV8.java @@ -31,9 +31,6 @@ import java.io.IOException; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; public class AzureRepositoryLockV8 implements RepositoryLock { @@ -55,7 +52,11 @@ public class AzureRepositoryLockV8 implements RepositoryLock { private final CloudBlockBlob blob; - private final ExecutorService executor; + private final Thread refresherThread; + + private static final String REFRESHER_THREAD_NAME = "AzureRepositoryLock-Refresher"; + + private boolean inError; private final int timeoutSec; @@ -72,7 +73,8 @@ public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAc public AzureRepositoryLockV8(CloudBlockBlob blob, Runnable shutdownHook, WriteAccessController writeAccessController, int timeoutSec) { this.shutdownHook = shutdownHook; this.blob = blob; - this.executor = Executors.newSingleThreadExecutor(); + this.refresherThread = new Thread(this::refreshLease, REFRESHER_THREAD_NAME); + this.refresherThread.setDaemon(true); this.timeoutSec = timeoutSec; this.writeAccessController = writeAccessController; @@ -116,67 +118,77 @@ public AzureRepositoryLockV8 lock() throws IOException { log.error("Can't acquire the lease in {}s.", timeoutSec); throw new IOException(ex); } else { - executor.submit(this::refreshLease); + refresherThread.start(); return this; } } private void refreshLease() { + log.info("Starting the lease renewal loop"); doUpdate = true; long lastUpdate = 0; + setInError(false); while (doUpdate) { - long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; try { - if (timeSinceLastUpdate > renewalInterval) { - - BlobRequestOptions requestOptions = new BlobRequestOptions(); - requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); - requestOptions.setRetryPolicyFactory(new RetryNoRetry()); - blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); - - writeAccessController.enableWriting(); - lastUpdate = System.currentTimeMillis(); - } - } catch (Exception e) { - timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + long timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; + try { + if (timeSinceLastUpdate > renewalInterval) { + + BlobRequestOptions requestOptions = new BlobRequestOptions(); + requestOptions.setMaximumExecutionTimeInMs(LEASE_RENEWAL_TIMEOUT_MS); + requestOptions.setRetryPolicyFactory(new RetryNoRetry()); + blob.renewLease(AccessCondition.generateLeaseCondition(leaseId), requestOptions, null); + + writeAccessController.enableWriting(); + if (isInError()) { + log.info("Lease renewal successful again."); + setInError(false); + } + lastUpdate = System.currentTimeMillis(); + } + } catch (Exception e) { + timeSinceLastUpdate = (System.currentTimeMillis() - lastUpdate) / 1000; - if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { - writeAccessController.disableWriting(); - } + if (timeSinceLastUpdate > timeToWaitBeforeWriteBlock) { + writeAccessController.disableWriting(); + } - if (e instanceof StorageException) { - StorageException storageException = (StorageException) e; - if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT - , StorageErrorCode.SERVICE_INTERNAL_ERROR - , StorageErrorCodeStrings.SERVER_BUSY - , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { - log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); - } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { - log.warn("Client side error. Retry in progress ...", e); + if (e instanceof StorageException) { + StorageException storageException = (StorageException) e; + if (Set.of(StorageErrorCodeStrings.OPERATION_TIMED_OUT + , StorageErrorCode.SERVICE_INTERNAL_ERROR + , StorageErrorCodeStrings.SERVER_BUSY + , StorageErrorCodeStrings.INTERNAL_ERROR).contains(storageException.getErrorCode())) { + log.warn("Could not renew the lease due to the operation timeout or service unavailability. Retry in progress ...", e); + } else if (storageException.getHttpStatusCode() == Constants.HeaderConstants.HTTP_UNUSED_306) { + log.warn("Client side error. Retry in progress ...", e); + } else { + log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); + } } else { - log.warn("Could not renew lease due to storage exception. Retry in progress ... ", e); + log.error("Can't renew the lease", e); + shutdownHook.run(); + doUpdate = false; + return; } - } else { - log.error("Can't renew the lease", e); - shutdownHook.run(); - doUpdate = false; - return; } - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - log.error("Interrupted the lease renewal loop", e); + waitABit(100); + } catch (Throwable t) { + if (!isInError()) { + log.error("Unexpected error in the lease renewal loop, trying to recover", t); + setInError(true); + } + waitABit(100); } } + log.info("Lease renewal loop exiting."); } @Override public void unlock() throws IOException { doUpdate = false; - executor.shutdown(); try { - executor.awaitTermination(1, TimeUnit.MINUTES); + refresherThread.join(60000); } catch (InterruptedException e) { throw new IOException(e); } finally { @@ -194,4 +206,21 @@ private void releaseLease() throws IOException { throw new IOException(e); } } + + private void setInError(boolean inError) { + this.inError = inError; + refresherThread.setName(REFRESHER_THREAD_NAME + (inError ? "-InError" : "")); + } + + private boolean isInError() { + return inError; + } + + private void waitABit(long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + // ignore + } + } } From 871294b437ca4a2f688f58b7ac2ced8c31b7dcab Mon Sep 17 00:00:00 2001 From: ierandra Date: Thu, 21 Nov 2024 15:36:21 +0200 Subject: [PATCH 16/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR merge --- .../azure/util/AzureRequestOptions.java | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index 8aec048eddd..1846f6d86b2 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -49,17 +49,17 @@ public static RequestRetryOptions getRetryOptionsDefault() { } public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - int timeoutExecution = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + int tryTimeoutInSeconds = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - long timeoutIntervalToMs = timeoutInterval * 1_000L; - long timeoutIntervalMax = timeoutIntervalToMs * 5; + long retryDelayInMs = timeoutInterval * 1_000L; + long maxRetryDelayInMs = retryDelayInMs * 5; return new RequestRetryOptions(RetryPolicyType.FIXED, - retryAttempts, - timeoutExecution, - timeoutIntervalToMs, - timeoutIntervalMax, + maxTries, + tryTimeoutInSeconds, + retryDelayInMs, + maxRetryDelayInMs, secondaryHost); } @@ -68,17 +68,17 @@ public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { * @return */ public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { - int retryAttempts = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - Integer writeTimeoutExecution = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - Integer writeTimeoutInterval = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - long writeTimeoutIntervalToMs = writeTimeoutInterval * 1_000L; - long writeTimeoutIntervalMax = writeTimeoutIntervalToMs * 5; + int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); + Integer tryTimeoutInSeconds = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); + Integer writeTimeoutIntervalInSeconds = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + long retryDelayInMs = writeTimeoutIntervalInSeconds * 1_000L; + long maxRetryDelayInMs = retryDelayInMs * 5; return new RequestRetryOptions(RetryPolicyType.FIXED, - retryAttempts, - writeTimeoutExecution, - writeTimeoutIntervalToMs, - writeTimeoutIntervalMax, + maxTries, + tryTimeoutInSeconds, + retryDelayInMs, + maxRetryDelayInMs, null); } From d236a3ca931111ade4c01c6ea74e0fc6c4b82ec8 Mon Sep 17 00:00:00 2001 From: ierandra Date: Tue, 26 Nov 2024 12:14:49 +0200 Subject: [PATCH 17/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - PR review --- .../azure/util/AzureRequestOptions.java | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java index 1846f6d86b2..14a3542468f 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/util/AzureRequestOptions.java @@ -30,14 +30,9 @@ public class AzureRequestOptions { static final String RETRY_BACKOFF_PROP = "segment.azure.retry.backoff"; static final int DEFAULT_RETRY_BACKOFF_SECONDS = 5; - static final String TIMEOUT_EXECUTION_PROP = "segment.timeout.execution"; - static final int DEFAULT_TIMEOUT_EXECUTION = 30; - static final String TIMEOUT_INTERVAL_PROP = "segment.timeout.interval"; static final int DEFAULT_TIMEOUT_INTERVAL = 1; - static final String WRITE_TIMEOUT_EXECUTION_PROP = "segment.write.timeout.execution"; - static final String WRITE_TIMEOUT_INTERVAL_PROP = "segment.write.timeout.interval"; private AzureRequestOptions() { @@ -50,10 +45,9 @@ public static RequestRetryOptions getRetryOptionsDefault() { public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - int tryTimeoutInSeconds = Integer.getInteger(TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - int timeoutInterval = Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - long retryDelayInMs = timeoutInterval * 1_000L; - long maxRetryDelayInMs = retryDelayInMs * 5; + int tryTimeoutInSeconds = getReadTryTimeoutInSeconds(); + long retryDelayInMs = Integer.getInteger(RETRY_BACKOFF_PROP, DEFAULT_RETRY_BACKOFF_SECONDS) * 1_000L; + long maxRetryDelayInMs = retryDelayInMs; return new RequestRetryOptions(RetryPolicyType.FIXED, maxTries, @@ -69,10 +63,10 @@ public static RequestRetryOptions getRetryOptionsDefault(String secondaryHost) { */ public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() { int maxTries = Integer.getInteger(RETRY_ATTEMPTS_PROP, DEFAULT_RETRY_ATTEMPTS); - Integer tryTimeoutInSeconds = Integer.getInteger(WRITE_TIMEOUT_EXECUTION_PROP, DEFAULT_TIMEOUT_EXECUTION); - Integer writeTimeoutIntervalInSeconds = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); - long retryDelayInMs = writeTimeoutIntervalInSeconds * 1_000L; - long maxRetryDelayInMs = retryDelayInMs * 5; + // if the value for write is not set use the read value + int tryTimeoutInSeconds = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, getReadTryTimeoutInSeconds()); + long retryDelayInMs = Integer.getInteger(WRITE_TIMEOUT_INTERVAL_PROP, DEFAULT_RETRY_BACKOFF_SECONDS) * 1_000L; + long maxRetryDelayInMs = retryDelayInMs; return new RequestRetryOptions(RetryPolicyType.FIXED, maxTries, @@ -82,4 +76,8 @@ public static RequestRetryOptions getRetryOperationsOptimiseForWriteOperations() null); } + private static int getReadTryTimeoutInSeconds() { + return Integer.getInteger(TIMEOUT_INTERVAL_PROP, DEFAULT_TIMEOUT_INTERVAL); + } + } \ No newline at end of file From 6d5da6965b408dadd82807468ec51ccd152abb63 Mon Sep 17 00:00:00 2001 From: ierandra Date: Thu, 19 Dec 2024 11:07:50 +0200 Subject: [PATCH 18/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix tests --- .../azure/AzureSegmentArchiveWriterTest.java | 26 +++++++++++++------ .../v8/AzureSegmentArchiveWriterV8Test.java | 22 +++++++++++++--- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java index 14c69d893ca..b2a12f039b4 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriterTest.java @@ -30,10 +30,7 @@ import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.mockserver.client.MockServerClient; import org.mockserver.junit.MockServerRule; import org.mockserver.matchers.Times; @@ -53,6 +50,10 @@ public class AzureSegmentArchiveWriterTest { public static final String BASE_PATH = "/devstoreaccount1/oak-test"; public static final int MAX_ATTEMPTS = 3; + private static final String RETRY_ATTEMPTS = "segment.azure.retry.attempts"; + private static final String TIMEOUT_EXECUTION = "segment.timeout.execution"; + private static final String RETRY_INTERVAL_MS = "azure.segment.archive.writer.retries.intervalMs"; + private static final String WRITE_RETRY_ATTEMPTS = "azure.segment.archive.writer.retries.max"; @Rule public MockServerRule mockServerRule = new MockServerRule(this); @@ -66,12 +67,21 @@ public class AzureSegmentArchiveWriterTest { @Before public void setUp() throws Exception { mockServerClient = new MockServerClient("localhost", mockServerRule.getPort()); - System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); - System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); + System.setProperty(RETRY_INTERVAL_MS, "100"); + System.setProperty(WRITE_RETRY_ATTEMPTS, Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.attempts", "1"); - System.setProperty("segment.timeout.execution", "1"); + System.setProperty(RETRY_ATTEMPTS, "1"); + System.setProperty(TIMEOUT_EXECUTION, "1"); + } + + @AfterClass + public static void setDown() { + // resetting the values for the properties set in setUp(). otherwise these will apply to all the tests that are executed after + System.clearProperty(RETRY_ATTEMPTS); + System.clearProperty(TIMEOUT_EXECUTION); + System.clearProperty(RETRY_INTERVAL_MS); + System.clearProperty(WRITE_RETRY_ATTEMPTS); } @Test diff --git a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java index 2a46b309a5c..e659701cdbd 100644 --- a/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java +++ b/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/azure/v8/AzureSegmentArchiveWriterV8Test.java @@ -27,6 +27,7 @@ import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveManager; import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveWriter; import org.jetbrains.annotations.NotNull; +import org.junit.AfterClass; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -50,6 +51,10 @@ public class AzureSegmentArchiveWriterV8Test { public static final String BASE_PATH = "/devstoreaccount1/oak-test"; public static final int MAX_ATTEMPTS = 3; + private static final String RETRY_ATTEMPTS = "segment.azure.retry.attempts"; + private static final String TIMEOUT_EXECUTION = "segment.timeout.execution"; + private static final String RETRY_INTERVAL_MS = "azure.segment.archive.writer.retries.intervalMs"; + private static final String WRITE_RETRY_ATTEMPTS = "azure.segment.archive.writer.retries.max"; @Rule public MockServerRule mockServerRule = new MockServerRule(this); @@ -63,12 +68,21 @@ public class AzureSegmentArchiveWriterV8Test { public void setUp() throws Exception { container = createCloudBlobContainer(); - System.setProperty("azure.segment.archive.writer.retries.intervalMs", "100"); - System.setProperty("azure.segment.archive.writer.retries.max", Integer.toString(MAX_ATTEMPTS)); + System.setProperty(RETRY_INTERVAL_MS, "100"); + System.setProperty(WRITE_RETRY_ATTEMPTS, Integer.toString(MAX_ATTEMPTS)); // Disable Azure SDK own retry mechanism used by AzureSegmentArchiveWriter - System.setProperty("segment.azure.retry.attempts", "0"); - System.setProperty("segment.timeout.execution", "1"); + System.setProperty(RETRY_ATTEMPTS, "0"); + System.setProperty(TIMEOUT_EXECUTION, "1"); + } + + @AfterClass + public static void setDown() { + // resetting the values for the properties set in setUp(). otherwise these will apply to all the tests that are executed after + System.clearProperty(RETRY_ATTEMPTS); + System.clearProperty(TIMEOUT_EXECUTION); + System.clearProperty(RETRY_INTERVAL_MS); + System.clearProperty(WRITE_RETRY_ATTEMPTS); } @Test From bf3b7cc05aaf26ad246b2263dfaa544ee035a009 Mon Sep 17 00:00:00 2001 From: ierandra Date: Thu, 19 Dec 2024 18:44:22 +0200 Subject: [PATCH 19/19] OAK-8413 Use the new Azure SDK in the Azure Segment Store - fix tests --- oak-segment-azure/pom.xml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/oak-segment-azure/pom.xml b/oak-segment-azure/pom.xml index 9f3cd85f7de..17196b7c21a 100644 --- a/oak-segment-azure/pom.xml +++ b/oak-segment-azure/pom.xml @@ -63,6 +63,7 @@ io.netty.channel.epoll;resolution:=optional, io.netty.handler.codec.*;resolution:=optional, org.objectweb.asm;resolution:=optional, + org.codehaus.stax2;resolution:=optional, !org.apache.jackrabbit.oak.segment*, !com.google.*, !android.os, @@ -108,7 +109,8 @@ json-smart, content-type, accessors-smart, - nimbus-jose-jwt + nimbus-jose-jwt, + stax2-api <_plugin /> @@ -199,6 +201,11 @@ com.microsoft.azure azure-keyvault-core + + org.codehaus.woodstox + stax2-api + 4.2.2 +