From fa66bebf88d6fc69765d5ecb26930ece94477024 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Mon, 2 Oct 2023 13:31:02 -0700 Subject: [PATCH 01/12] Use of LogByteSizeMergePolicy for data stream use cases (#9992) * Configurable merge policy for index * additional setting to configure merge policy for timestamp based index * introduction of logbytesize merge policy as an option Signed-off-by: Rishabh Maurya * remove the trace log not required anymore Signed-off-by: Rishabh Maurya * Refactor the merge policy extraction logic Signed-off-by: Rishabh Maurya * Rename constant DEFAULT to DEFAULT_POLICY Signed-off-by: Rishabh Maurya * Simplify merge policy extraction and selection logic Signed-off-by: Rishabh Maurya * missing javadoc error Signed-off-by: Rishabh Maurya * Renaming log byte size policy setting with mb Signed-off-by: Rishabh Maurya * Move validation exception to enum from setting defn Signed-off-by: Rishabh Maurya * rename time_index to time_series_index Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 4 +- .../RemoveCorruptedShardDataCommandIT.java | 4 +- .../index/store/CorruptedFileIT.java | 10 +- .../indices/stats/IndexStatsIT.java | 10 +- .../java/org/opensearch/update/UpdateIT.java | 4 +- .../common/settings/ClusterSettings.java | 1 + .../common/settings/IndexScopedSettings.java | 29 +- .../org/opensearch/index/IndexSettings.java | 156 ++++++- .../index/LogByteSizeMergePolicyProvider.java | 166 +++++++ .../opensearch/index/MergePolicyProvider.java | 31 ++ ...ig.java => TieredMergePolicyProvider.java} | 84 ++-- .../opensearch/index/shard/IndexShard.java | 2 +- .../segments/IndicesSegmentsRequestTests.java | 4 +- .../index/MergePolicySettingsTests.java | 421 +++++++++++++++--- .../index/MergeSchedulerSettingsTests.java | 8 +- .../RemoveCorruptedShardDataCommandTests.java | 4 +- .../indices/recovery/RecoveryTests.java | 4 +- .../test/OpenSearchIntegTestCase.java | 4 +- 19 files changed, 776 insertions(+), 171 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java create mode 100644 server/src/main/java/org/opensearch/index/MergePolicyProvider.java rename server/src/main/java/org/opensearch/index/{MergePolicyConfig.java => TieredMergePolicyProvider.java} (82%) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1834df696769..7519e242c0acf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 2bab61f3e1c4c..229cd7bffad2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -53,7 +53,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.ShardPath; @@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index f8c2acbf99f70..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -73,7 +73,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 7e1d0792e3ddb..8291fef5d177b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -72,7 +72,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index a0f01acd1f8e9..0967acb37d3e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -66,8 +66,8 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; @@ -589,8 +589,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -621,8 +621,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 442268d513fc3..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 032027384f106..5261d40387dc6 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -457,6 +457,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 5b2afc44600bd..83bf8c82ee3dd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -45,9 +45,11 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -120,14 +122,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -202,6 +204,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -275,7 +284,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 1e4224c314f05..ce6c1a5ad6284 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -54,6 +54,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -83,9 +84,42 @@ */ @PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -566,11 +600,25 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -651,7 +699,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -844,7 +893,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -866,33 +916,59 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); - - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -1439,9 +1515,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public T getValue(Setting setting) { @@ -1632,7 +1742,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1643,7 +1753,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + *

+ * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the index.datastream_merge.policy setting. + * + *

+ * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + *

+ * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java similarity index 82% rename from server/src/main/java/org/opensearch/index/MergePolicyConfig.java rename to server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index fe2af21dfe039..d5d354c6c960a 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; @@ -47,9 +48,12 @@ * where the index data is stored, and are immutable up to delete markers. * Segments are, periodically, merged into larger segments to keep the * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * * *

- * Merges select segments of approximately equal size, subject to an allowed + * Tiered merge policy select segments of approximately equal size, subject to an allowed * number of segments per tier. The merge policy is able to merge * non-adjacent segments, and separates how many segments are merged at once from how many * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). @@ -125,8 +129,9 @@ * @opensearch.internal */ -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + private final Logger logger; private final boolean mergesEnabled; @@ -137,10 +142,11 @@ public final class MergePolicyConfig { public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( "index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + TieredMergePolicyProvider::parseNoCFSRatio, Property.Dynamic, Property.IndexScope ); @@ -194,10 +200,8 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); @@ -216,54 +220,41 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); } void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); } void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + tieredMergePolicy.setNoCFSRatio(noCFSRatio); } void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { @@ -285,11 +276,11 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT return maxMergeAtOnce; } - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + public static double parseNoCFSRatio(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { return 1.0d; @@ -310,4 +301,23 @@ private static double parseNoCFSRatio(String noCFSRatio) { } } } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d476e8b7c9288..5ce066b156775 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3772,7 +3772,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 67846efab2af8..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index 387997892ee30..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index 2443ee1ab40be..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 9c8f9896850c6..c88c86d51be08 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,7 +58,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineCreationFailureException; import org.opensearch.index.engine.InternalEngineFactory; @@ -134,7 +134,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 11d916616578d..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -54,7 +54,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -168,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6e064f943ca07..0b80c6e577f95 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -130,9 +130,9 @@ import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -500,7 +500,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } From d3bf230fe820f20121f3be35ca42b97cf804fea1 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 2 Oct 2023 13:47:48 -0700 Subject: [PATCH 02/12] Simplify initialization of Settings (#10194) There was a weird circular class-loading dependency between Settings and Settings.Builder that could result in ClassLoader deadlock if multiple threads trigger class-loading of Settings at the same time. Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../main/java/org/opensearch/common/settings/Settings.java | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7519e242c0acf..46f34abf72641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) ### Security diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 91e39e38f0379..d202feb0786bf 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -101,7 +101,7 @@ @PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map settings; @@ -757,7 +757,7 @@ public Set keySet() { @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map map = new TreeMap<>(); From 9d0db5ef413ebc9f2eb7d8721ef74be273908094 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Romain=20Tarti=C3=A8re?= Date: Mon, 2 Oct 2023 10:58:24 -1000 Subject: [PATCH 03/12] Remove spurious SGID (#9447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Setting the SGID bit on directories is maybe something some users will want to use, but setting it by default for all users does not really make sense and when packaging OpenSearch, we need to remove this customization when building packges. This was added to ElasticSearch to make it possible to manage the keystore as root while the service runs as an unprivileged user. Without the SGID trick, the generated keystore was owned by root and ElasticSearch could not access it. It is preferable to manage the keystore with non-root privileges, and this hack is not required in this case. Stick to the default permissions and remove this personalization. Signed-off-by: Romain Tartière --- CHANGELOG.md | 1 + distribution/packages/build.gradle | 8 ++++---- distribution/packages/src/deb/lintian/opensearch | 8 ++++---- .../test/java/org/opensearch/packaging/util/Packages.java | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46f34abf72641..c8e4cc57593a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,6 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed - Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 7914fcc172ef4..262ad6c802bbb 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); From 1d66af346f7863bbca44a3102a1af2fed5fd3804 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Mon, 2 Oct 2023 17:51:47 -0400 Subject: [PATCH 04/12] Fix: register mulitple extensions. (#10256) * Fix: register mulitple extensions. Signed-off-by: dblock * Updated CHANGELOG. Signed-off-by: dblock * Added tests. Signed-off-by: dblock --------- Signed-off-by: dblock --- CHANGELOG.md | 1 + .../extensions/ExtensionsManager.java | 21 ++- .../rest/RestActionsRequestHandler.java | 3 + .../rest/RestInitializeExtensionAction.java | 3 +- .../rest/RestSendToExtensionAction.java | 2 +- .../extensions/ExtensionsManagerTests.java | 145 +++++++++++++++++- .../RestInitializeExtensionActionTests.java | 19 +-- .../rest/RestSendToExtensionActionTests.java | 6 +- 8 files changed, 176 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8e4cc57593a4..fb37533d4c834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -131,6 +131,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) - Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) - Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) ### Security diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9f9ba548143c6..b531abcb845d7 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -300,7 +300,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -314,6 +314,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -340,11 +346,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler initializeExtensionResponseHandler = new TransportResponseHandler< @@ -384,7 +390,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -399,11 +406,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension, issueServiceAccount(extension)), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode, issueServiceAccount(extensionNode)), initializeExtensionResponseHandler ); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index 97851cbd394a0..383796f0c3b44 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -62,6 +62,9 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 33f44a913dd8a..41783b89ccc69 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -150,7 +150,7 @@ public RestSendToExtensionAction( @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index f243a924f4e63..c61afdd5c5261 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; @@ -77,6 +78,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -409,19 +411,94 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client, + identityService + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -431,6 +508,58 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -452,6 +581,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -467,6 +599,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -481,6 +616,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -495,6 +633,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index cdddf8e9be1be..e237214ab88f5 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -19,8 +19,9 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.noop.NoopTracer; @@ -160,8 +161,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -177,10 +178,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -210,8 +211,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -227,10 +228,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index ee36ea170e270..fe738ff7d85e6 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -150,7 +150,7 @@ public void testRestSendToExtensionAction() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -183,7 +183,7 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -229,7 +229,7 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) From beb25b1957321234aa3642977b0f7144318c8d8a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 2 Oct 2023 18:21:58 -0400 Subject: [PATCH 05/12] Bump netty from 4.1.97.Final to 4.1.99.Final (#10306) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.99.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.97.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.97.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.99.Final.jar.sha1 | 1 + 68 files changed, 35 insertions(+), 34 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index fb37533d4c834..9b4e6408f3491 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` - Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 243a1b2c6f57e..e54a5a1089a93 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.97.Final +netty = 4.1.99.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index f592ac8312a5d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d266d079ef33cf93a16b382d64dd15d562df1159 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6c1112ed49775 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +21c76a42a468faafac6c84f8aca775073fc8e345 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 deleted file mode 100644 index d06147a0ba646..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30e8fa29a349db5a933225d61891b8802836bb79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..717703c36e1ab --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +259bf1c5178c3e23bb89a2fab59b6d22846e3fa6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 deleted file mode 100644 index 67c3a763d26fa..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99ecef0e1d86a92e40a7c89805c236d9cd7493e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..3f69ae54c5d4a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +8c8a89ea89b06e120c57bdb3db14b9a47ca30bb3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 deleted file mode 100644 index 60fd706436ae7..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c50f835777ecd4535e15b552b5d9ccb26a2504f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..adef44a4e7da7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +804d8b752847923d3bb81f24de604597047c9b2e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 deleted file mode 100644 index c6fa4cc175222..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afec3c414a0ab7264a66a7572e9e9d3a19a3e0e5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..0756635018837 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +a45aa70bc50d0500da5cdcd595cc838d87ada987 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 deleted file mode 100644 index cbf685a6d79d3..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -893888d09a7bef0d0ba973d7471943e765d0fd08 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..f9bdefc6dd965 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +c5a3481c4bb9732a3a94fb63cf916141a1a14669 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8e40c8826d76d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -795da37ded759e862457a82d9d92c4d39ce8ecee \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..75b64ad4197d8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +961bd5b8d97ea6a07168176462f398089a24b5c8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index f736d37d071b7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d469d84265ab70095b01b40886cabdd433b6e664 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6b7b66ea768e3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +cb0fc6c31c387404212949c57950b5d72ce908b9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8430355365996..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8f3d8644afa5e6e1a40a3a6aeb9d9aa970ecb4f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..5b393be40e945 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9f02dcb9b15a647a56af210dffdc294a57922fb0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 deleted file mode 100644 index 7a36dc1f2724f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -384ba4d75670befbedb45c4d3b497a93639c206d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..45ea27d29a183 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9984cbd6e5d55c768f198e975d8aaf7fd42a4602 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 deleted file mode 100644 index 37b78a32f741f..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af78acec783ffd77c63d8aeecc21041fd39ac54f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..6bb7fcd68b272 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +7142095066eaebd5f29b88c41af7b383b6a953f6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 deleted file mode 100644 index 1bdfec3aae6ba..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cceacaf11df8dc63f23d0fb58e9d4640fc88404 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..d53adfa649f5f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +278f6dfa49d6bd75c40ae1470eb165716f87dce0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 deleted file mode 100644 index 8b7b50a6fc9c6..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abb86c6906bf512bf2b797a41cd7d2e8d3cd7c36 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..258f7c957dda0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +742693761d7ea4c038bccfda96bb38194720b80d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 deleted file mode 100644 index 032959e98d009..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cec8348108dc76c47cf87c669d514be52c922144 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..b8bc0a4370f58 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +080e45397d9d5b134477de3ffd0f94283b908621 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 deleted file mode 100644 index 107863c1b3c9d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.97.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f37380d23c9bb079bc702910833b2fd532c9abd0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 new file mode 100644 index 0000000000000..247975e0a64c7 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.99.Final.jar.sha1 @@ -0,0 +1 @@ +9ca2e3ae19a6713b749df154622115f480b6716c \ No newline at end of file From 3a790c150e6a8b45d385341e276da6f226489515 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Mon, 2 Oct 2023 22:37:17 -0700 Subject: [PATCH 06/12] Disable concurrent search when terminate_after is used (#10200) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../simple/ParameterizedSimpleSearchIT.java | 608 ----------------- .../search/simple/SimpleSearchIT.java | 645 +++++++++++++++++- .../search/DefaultSearchContext.java | 2 + 4 files changed, 645 insertions(+), 611 deletions(-) delete mode 100644 server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b4e6408f3491..9963eaef31d33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) - [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) - Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java deleted file mode 100644 index 719b75079da92..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/ParameterizedSimpleSearchIT.java +++ /dev/null @@ -1,608 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.simple; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequestBuilder; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.query.QueryBuilders.boolQuery; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.index.query.QueryBuilders.queryStringQuery; -import static org.opensearch.index.query.QueryBuilders.rangeQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class ParameterizedSimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { - - public ParameterizedSimpleSearchIT(Settings settings) { - super(settings); - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } - ); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - - public void testSearchNullIndex() { - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - expectThrows( - NullPointerException.class, - () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - - } - - public void testSearchRandomPreference() throws InterruptedException, ExecutionException { - createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") - ); - - int iters = scaledRandomIntBetween(10, 20); - for (int i = 0; i < iters; i++) { - String randomPreference = randomUnicodeOfLengthBetween(0, 4); - // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) - while (randomPreference.startsWith("_")) { - randomPreference = randomUnicodeOfLengthBetween(0, 4); - } - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .setPreference(randomPreference) - .get(); - assertHitCount(searchResponse, 6L); - - } - } - - public void testSimpleIp() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("from") - .field("type", "ip") - .endObject() - .startObject("to") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse search = client().prepareSearch() - .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) - .get(); - - assertHitCount(search, 1L); - } - - public void testIpCidr() throws Exception { - createIndex("test"); - - client().admin() - .indices() - .preparePutMapping("test") - .setSource( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("ip") - .field("type", "ip") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .get(); - ensureGreen(); - - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); - assertHitCount(search, 3L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); - assertHitCount(search, 4L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); - assertHitCount(search, 1L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); - assertHitCount(search, 5L); - - search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); - assertHitCount(search, 0L); - - assertFailures( - client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), - RestStatus.BAD_REQUEST, - containsString("Expected [ip/prefix] but was [0/0/0/0/0]") - ); - } - - public void testSimpleId() { - createIndex("test"); - - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - // id is not indexed, but lets see that we automatically convert to - SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); - assertHitCount(searchResponse, 1L); - } - - public void testSimpleDateRange() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - ensureGreen(); - refresh(); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) - .get(); - assertHitCount(searchResponse, 2L); - } - - // TODO: combine this test with SimpleSearchIT.testSimpleTerminateAfterCount after - // https://github.com/opensearch-project/OpenSearch/issues/8371 - public void testSimpleTerminateAfterCountWithSizeAndTrackHits() throws Exception { - prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); - ensureGreen(); - int numDocs = randomIntBetween(15, 29); - List docbuilders = new ArrayList<>(numDocs); - - for (int i = 1; i <= numDocs; i++) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setTerminateAfter(numDocs) - .setSize(0) - .setTrackTotalHits(true) - .get(); - assertEquals(0, searchResponse.getFailedShards()); - } - - public void testSimpleIndexSortEarlyTerminate() throws Exception { - prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") - ).setMapping("rank", "type=integer").get(); - ensureGreen(); - int max = randomIntBetween(3, 29); - List docbuilders = new ArrayList<>(max); - - for (int i = max - 1; i >= 0; i--) { - String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); - } - - indexRandom(true, docbuilders); - ensureGreen(); - refresh(); - - SearchResponse searchResponse; - for (int i = 1; i < max; i++) { - searchResponse = client().prepareSearch("test") - .addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } - } - } - - public void testInsaneFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); - assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); - } - - public void testTooLargeFromAndSize() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); - assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); - assertWindowFails( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - ); - } - - public void testLargeFromAndSizeSucceeds() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkBySetting() throws Exception { - prepareCreate("idx").setSettings( - Settings.builder() - .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), - IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 - ) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .get(), - 1 - ); - } - - public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); - assertHitCount( - client().prepareSearch("idx") - .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .get(), - 1 - ); - } - - public void testTooLargeRescoreWindow() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertRescoreWindowFails(Integer.MAX_VALUE); - assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); - } - - public void testTooLargeRescoreOkBySetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - prepareCreate("idx").setSettings( - Settings.builder() - .put( - IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. - defaultMaxWindow * 2 - ) - ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { - int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); - createIndex("idx"); - assertAcked( - client().admin() - .indices() - .prepareUpdateSettings("idx") - .setSettings( - // Note that this is the RESULT window - Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) - ) - .get() - ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - assertHitCount( - client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), - 1 - ); - } - - public void testQueryNumericFieldWithRegex() throws Exception { - assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); - ensureGreen("idx"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); - } - } - - public void testTermQueryBigInt() throws Exception { - prepareCreate("idx").setMapping("field", "type=keyword").get(); - ensureGreen("idx"); - - client().prepareIndex("idx") - .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - .get(); - - String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - } - - public void testTooLongRegexInRegexpQuery() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); - - int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); - StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); - while (regexp.length() <= defaultMaxRegexLength) { - regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); - } - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() - ); - assertThat( - e.getRootCause().getMessage(), - containsString( - "The length of regex [" - + regexp.length() - + "] used in the Regexp Query request has exceeded " - + "the allowed maximum of [" - + defaultMaxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ) - ); - } - - private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Result window is too large, from + size must be less than or equal to: [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); - } - - private void assertRescoreWindowFails(int windowSize) { - SearchRequestBuilder search = client().prepareSearch("idx") - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); - assertThat( - e.toString(), - containsString( - "Rescore window [" - + windowSize - + "] is too large. It must " - + "be less than [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) - ) - ); - assertThat( - e.toString(), - containsString( - "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." - ) - ); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 67e460653245e..95b36311f6b8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -6,24 +6,283 @@ * compatible open source license. */ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.rescore.QueryRescorerBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.query.QueryBuilders.boolQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + +public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSearchNullIndex() { + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + expectThrows( + NullPointerException.class, + () -> client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() + ); + + } + + public void testSearchRandomPreference() throws InterruptedException, ExecutionException { + createIndex("test"); + indexRandom( + true, + client().prepareIndex("test").setId("1").setSource("field", "value"), + client().prepareIndex("test").setId("2").setSource("field", "value"), + client().prepareIndex("test").setId("3").setSource("field", "value"), + client().prepareIndex("test").setId("4").setSource("field", "value"), + client().prepareIndex("test").setId("5").setSource("field", "value"), + client().prepareIndex("test").setId("6").setSource("field", "value") + ); + + int iters = scaledRandomIntBetween(10, 20); + for (int i = 0; i < iters; i++) { + String randomPreference = randomUnicodeOfLengthBetween(0, 4); + // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary) + while (randomPreference.startsWith("_")) { + randomPreference = randomUnicodeOfLengthBetween(0, 4); + } + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setPreference(randomPreference) + .get(); + assertHitCount(searchResponse, 6L); + + } + } + + public void testSimpleIp() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("from") + .field("type", "ip") + .endObject() + .startObject("to") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + + client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse search = client().prepareSearch() + .setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))) + .get(); + + assertHitCount(search, 1L); + } + + public void testIpCidr() throws Exception { + createIndex("test"); + + client().admin() + .indices() + .preparePutMapping("test") + .setSource( + XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("ip") + .field("type", "ip") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + refresh(); + + SearchResponse search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))).get(); + assertHitCount(search, 3L); -public class SimpleSearchIT extends OpenSearchIntegTestCase { + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))).get(); + assertHitCount(search, 4L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))).get(); + assertHitCount(search, 1L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))).get(); + assertHitCount(search, 5L); + + search = client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))).get(); + assertHitCount(search, 0L); + + assertFailures( + client().prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), + RestStatus.BAD_REQUEST, + containsString("Expected [ip/prefix] but was [0/0/0/0/0]") + ); + } + + public void testSimpleId() { + createIndex("test"); + + client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + // id is not indexed, but lets see that we automatically convert to + SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).get(); + assertHitCount(searchResponse, 1L); + } + + public void testSimpleDateRange() throws Exception { + createIndex("test"); + client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + ensureGreen(); + refresh(); + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")) + .get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")) + .get(); + assertHitCount(searchResponse, 2L); + } - // TODO: Move this test to ParameterizedSimpleSearchIT after https://github.com/opensearch-project/OpenSearch/issues/8371 public void testSimpleTerminateAfterCount() throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -40,13 +299,18 @@ public void testSimpleTerminateAfterCount() throws Exception { refresh(); SearchResponse searchResponse; + int size; for (int i = 1; i < max; i++) { + size = randomIntBetween(0, max); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -57,4 +321,379 @@ public void testSimpleTerminateAfterCount() throws Exception { assertHitCount(searchResponse, max); assertFalse(searchResponse.isTerminatedEarly()); } + + public void testSimpleTerminateAfterTrackTotalHitsUpTo() throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + // size=0 is a special case where topDocsCollector is not added + int size = randomIntBetween(0, 1); + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") + ).setMapping("rank", "type=integer").get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max - 1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i) + .get(); + assertNull(searchResponse.getHits().getTotalHits()); + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + } + + public void testInsaneFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); + assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); + } + + public void testTooLargeFromAndSize() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); + assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); + assertWindowFails( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + ); + } + + public void testLargeFromAndSizeSucceeds() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkBySetting() throws Exception { + prepareCreate("idx").setSettings( + Settings.builder() + .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2 + ) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) + .get(), + 1 + ); + } + + public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); + assertHitCount( + client().prepareSearch("idx") + .setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) + .get(), + 1 + ); + } + + public void testTooLargeRescoreWindow() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertRescoreWindowFails(Integer.MAX_VALUE); + assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); + } + + public void testTooLargeRescoreOkBySetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + prepareCreate("idx").setSettings( + Settings.builder() + .put( + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), // Note that this is the RESULT window. + defaultMaxWindow * 2 + ) + ).get(); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception { + int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); + createIndex("idx"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("idx") + .setSettings( + // Note that this is the RESULT window + Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2) + ) + .get() + ); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + assertHitCount( + client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), + 1 + ); + } + + public void testQueryNumericFieldWithRegex() throws Exception { + assertAcked(prepareCreate("idx").setMapping("num", "type=integer")); + ensureGreen("idx"); + + try { + client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getRootCause().getMessage(), containsString("Can only use regexp queries on keyword and text fields")); + } + } + + public void testTermQueryBigInt() throws Exception { + prepareCreate("idx").setMapping("field", "type=keyword").get(); + ensureGreen("idx"); + + client().prepareIndex("idx") + .setId("1") + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); + + String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + + public void testTooLongRegexInRegexpQuery() throws Exception { + createIndex("idx"); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); + + int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); + StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); + while (regexp.length() <= defaultMaxRegexLength) { + regexp.append("]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\"); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + ); + assertThat( + e.getRootCause().getMessage(), + containsString( + "The length of regex [" + + regexp.length() + + "] used in the Regexp Query request has exceeded " + + "the allowed maximum of [" + + defaultMaxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ) + ); + } + + private void assertWindowFails(SearchRequestBuilder search) { + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Result window is too large, from + size must be less than or equal to: [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat(e.toString(), containsString("See the scroll api for a more efficient way to request large data sets")); + } + + private void assertRescoreWindowFails(int windowSize) { + SearchRequestBuilder search = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + assertThat( + e.toString(), + containsString( + "Rescore window [" + + windowSize + + "] is too large. It must " + + "be less than [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + ) + ); + assertThat( + e.toString(), + containsString( + "This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting." + ) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 28931bb5a860f..960b46d68977b 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -896,6 +896,8 @@ public void evaluateRequestShouldUseConcurrentSearch() { && aggregations().factories() != null && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); } else { requestShouldUseConcurrentSearch.set(true); } From 2df9eed412c93c52ac404d6358bdc35685ee853f Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 3 Oct 2023 16:58:31 +0530 Subject: [PATCH 07/12] Add unreferenced file cleanup count to merge stats (#10204) Signed-off-by: Rishav Sagar Co-authored-by: Rishav Sagar --- CHANGELOG.md | 1 + .../org/opensearch/index/engine/Engine.java | 12 +++++++++- .../opensearch/index/merge/MergeStats.java | 22 +++++++++++++++++-- .../opensearch/index/shard/IndexShard.java | 4 +++- .../index/engine/InternalEngineTests.java | 10 ++++++++- 5 files changed, 44 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9963eaef31d33..97a1386315dc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) - Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) - Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index a8f2f60f8cf12..9b0e1b70a8e05 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -145,6 +145,7 @@ public abstract class Engine implements LifecycleAware, Closeable { protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CounterMetric totalUnreferencedFileCleanUpsPerformed = new CounterMetric(); private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); @@ -267,6 +268,13 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the unreferenced file cleanup count for this engine. + */ + public long unreferencedFileCleanUpsPerformed() { + return totalUnreferencedFileCleanUpsPerformed.count(); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -1340,7 +1348,9 @@ private void cleanUpUnreferencedFiles() { .setOpenMode(IndexWriterConfig.OpenMode.APPEND) ) ) { - // do nothing and close this will kick off IndexFileDeleter which will remove all unreferenced files. + // do nothing except increasing metric count and close this will kick off IndexFileDeleter which will + // remove all unreferenced files + totalUnreferencedFileCleanUpsPerformed.inc(); } catch (Exception ex) { logger.error("Error while deleting unreferenced file ", ex); } diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index 37fdca8871b18..fc5bac24a60d6 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.merge; +import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -65,9 +66,9 @@ public class MergeStats implements Writeable, ToXContentFragment { private long totalBytesPerSecAutoThrottle; - public MergeStats() { + private long unreferencedFileCleanUpsPerformed; - } + public MergeStats() {} public MergeStats(StreamInput in) throws IOException { total = in.readVLong(); @@ -81,6 +82,9 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); + } } public void add( @@ -133,6 +137,7 @@ public void addTotals(MergeStats mergeStats) { this.totalSizeInBytes += mergeStats.totalSizeInBytes; this.totalStoppedTimeInMillis += mergeStats.totalStoppedTimeInMillis; this.totalThrottledTimeInMillis += mergeStats.totalThrottledTimeInMillis; + addUnreferencedFileCleanUpStats(mergeStats.unreferencedFileCleanUpsPerformed); if (this.totalBytesPerSecAutoThrottle == Long.MAX_VALUE || mergeStats.totalBytesPerSecAutoThrottle == Long.MAX_VALUE) { this.totalBytesPerSecAutoThrottle = Long.MAX_VALUE; } else { @@ -140,6 +145,14 @@ public void addTotals(MergeStats mergeStats) { } } + public void addUnreferencedFileCleanUpStats(long unreferencedFileCleanUpsPerformed) { + this.unreferencedFileCleanUpsPerformed += unreferencedFileCleanUpsPerformed; + } + + public long getUnreferencedFileCleanUpsPerformed() { + return this.unreferencedFileCleanUpsPerformed; + } + /** * The total number of merges executed. */ @@ -240,6 +253,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); } builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); + builder.field(Fields.UNREFERENCED_FILE_CLEANUPS_PERFORMED, unreferencedFileCleanUpsPerformed); builder.endObject(); return builder; } @@ -267,6 +281,7 @@ static final class Fields { static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES = "total_auto_throttle_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; + static final String UNREFERENCED_FILE_CLEANUPS_PERFORMED = "unreferenced_file_cleanups_performed"; } @Override @@ -282,5 +297,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); + } } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5ce066b156775..bc9d839624740 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1383,7 +1383,9 @@ public MergeStats mergeStats() { if (engine == null) { return new MergeStats(); } - return engine.getMergeStats(); + final MergeStats mergeStats = engine.getMergeStats(); + mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed()); + return mergeStats; } public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index e3d77d45861ac..efa13438cffcf 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -233,7 +233,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -3334,6 +3333,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will be incremented whenever cleanup is performed correctly. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(1L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3445,6 +3447,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever cleanup is disabled. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } @@ -3549,6 +3554,9 @@ public void onFailedEngine(String reason, Exception e) { ); assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever there is some issue with cleanup. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); } catch (Exception ex) { throw new AssertionError(ex); } From 20989755b2660ce9674fe2d2d95b125a6ffbfc7d Mon Sep 17 00:00:00 2001 From: Jongwoo Han Date: Tue, 3 Oct 2023 20:51:34 +0900 Subject: [PATCH 08/12] Replace deprecated command with environment file (#10289) Signed-off-by: Jongwoo Han --- .github/workflows/lucene-snapshots.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index c2a2cedaaefb4..76981276fe085 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -38,7 +38,7 @@ jobs: - name: Set hash working-directory: ./lucene run: | - echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT id: version - name: Initialize gradle settings From fe8068208c6243f1d6b1c22607461d6aa6f6893e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:27:53 -0400 Subject: [PATCH 09/12] Bump peter-evans/create-pull-request from 3 to 5 (#10301) * Bump peter-evans/create-pull-request from 3 to 5 Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 3 to 5. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/v3...v5) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/version.yml | 6 +++--- CHANGELOG.md | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index df785bcc70014..a20c671c137b2 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -59,7 +59,7 @@ jobs: sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} @@ -86,7 +86,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} @@ -113,7 +113,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: main diff --git a/CHANGELOG.md b/CHANGELOG.md index 97a1386315dc7..a6b9ff4eacfa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) @@ -139,4 +140,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file From e7909fc45da3b4e60ea18a0b37563050d3af1d54 Mon Sep 17 00:00:00 2001 From: Bhumika Saini Date: Tue, 3 Oct 2023 20:07:08 +0530 Subject: [PATCH 10/12] Add TODO for updating public documentation (#10242) Signed-off-by: Bhumika Saini --- .github/pull_request_template.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 69616e533d1ed..cd7c1bb980eec 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,6 +19,7 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) +- [ ] GitHub issue/PR created in [OpenSearch documentation repo](https://github.com/opensearch-project/documentation-website) for the required public documentation changes (#[Issue/PR number]) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). From 5aa18636706597351aacfa7fc51364cd54ba2c04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 11:11:27 -0400 Subject: [PATCH 11/12] Bump com.netflix.nebula.ospackage-base from 11.4.0 to 11.5.0 in /distribution/packages (#10295) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.4.0 to 11.5.0. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Andriy Redko --- CHANGELOG.md | 9 +++++---- distribution/packages/build.gradle | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6b9ff4eacfa2..8a37866012ba1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,10 +106,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.4 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) - Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([#10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([#10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) - Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) - Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) - Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 262ad6c802bbb..cb05661dc74a4 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.4.0" + id "com.netflix.nebula.ospackage-base" version "11.5.0" } void addProcessFilesTask(String type, boolean jdk) { From bddf0d3762f55d959ae76f3dea06259e7e795ebe Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 3 Oct 2023 16:07:34 -0400 Subject: [PATCH 12/12] Fix the HttpChannel trace termination in case of exceptions (#10325) Signed-off-by: Andriy Redko --- .../org/opensearch/telemetry/tracing/DefaultTracer.java | 5 ++++- .../org/opensearch/http/AbstractHttpServerTransport.java | 1 + server/src/main/java/org/opensearch/http/HttpChannel.java | 5 +++++ .../telemetry/tracing/channels/TraceableHttpChannel.java | 7 +++++++ .../tracing/handler/TraceableTransportResponseHandler.java | 1 + .../org/opensearch/transport/TransportResponseHandler.java | 2 +- 6 files changed, 19 insertions(+), 2 deletions(-) diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index d3c28b3a9cb5e..79b7e4aca6c2f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -25,7 +25,10 @@ */ @InternalApi class DefaultTracer implements Tracer { - static final String THREAD_NAME = "th_name"; + /** + * Current thread name. + */ + static final String THREAD_NAME = "thread.name"; private final TracingTelemetry tracingTelemetry; private final TracerContextStorage tracerContextStorage; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index ed44102d0abe4..b8f8abb6c2c23 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -298,6 +298,7 @@ static int resolvePublishPort(Settings settings, List boundAdd } public void onException(HttpChannel channel, Exception e) { + channel.handleException(e); if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 99aaed23c69b8..6dcdaf9034413 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -43,6 +43,11 @@ * @opensearch.internal */ public interface HttpChannel extends CloseableChannel { + /** + * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) + * @param ex the exception being raised + */ + default void handleException(Exception ex) {} /** * Sends an http response to the channel. The listener will be executed once the send process has been diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java index 9229d334dea01..03848e8e58207 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -56,6 +56,13 @@ public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) } } + @Override + public void handleException(Exception ex) { + span.addEvent("The HttpChannel was closed without sending the response"); + span.setError(ex); + span.endSpan(); + } + @Override public void close() { delegate.close(); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java index abddfcc6cebc1..538bf82a1dbec 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -101,6 +101,7 @@ public void handleRejection(Exception exp) { try (SpanScope scope = tracer.withSpanInScope(span)) { delegate.handleRejection(exp); } finally { + span.setError(exp); span.endSpan(); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 90e94e52515ce..8992af18edb48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -57,7 +57,7 @@ public interface TransportResponseHandler extends W * It should be used to clear up the resources held by the {@link TransportResponseHandler}. * @param exp exception */ - default void handleRejection(Exception exp) {}; + default void handleRejection(Exception exp) {} default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { final TransportResponseHandler self = this;