From 71f6136db5f6fe8661a6dd5ec46ed7c6336ba63b Mon Sep 17 00:00:00 2001 From: Laurent Laborde Date: Thu, 14 Sep 2023 18:47:50 +0200 Subject: [PATCH 01/23] Fixing javadoc error (#9976) * Fixing javadoc error fixing javadoc error and warning javadoc for org.opensearch.core.indices.breaker javadoc for org.opensearch.core.index.shard.ShardId javadoc for org.opensearch.core.index.Index fixing a mishap in formatting rule fixing javadoc of org.opensearch.cli.Terminal Signed-off-by: Laurent Laborde * removing unsupported @ImplNote Signed-off-by: Laurent Laborde * Update libs/common/src/main/java/org/opensearch/common/collect/Iterators.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Apply suggestions from code review Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * Update CollectionUtils.java Signed-off-by: Laurent Laborde * fixing javadoc warnings Signed-off-by: Laurent Laborde * remove useless p tag Signed-off-by: Laurent Laborde * removing a p tag Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde * proper capitalization Co-authored-by: Andriy Redko Signed-off-by: Laurent Laborde --------- Signed-off-by: Laurent Laborde Co-authored-by: Andriy Redko --- .../opensearch/common/collect/Iterators.java | 19 ++++++ .../opensearch/compress/ZstdCompressor.java | 51 ++++++++++++-- .../core/common/breaker/CircuitBreaker.java | 28 +++++--- .../breaker/CircuitBreakingException.java | 4 ++ .../common/breaker/NoopCircuitBreaker.java | 65 ++++++++++++++++-- .../common/bytes/AbstractBytesReference.java | 3 +- .../common/logging/LoggerMessageFormat.java | 29 ++++++++ .../common/transport/TransportAddress.java | 12 +++- .../core/common/unit/ByteSizeUnit.java | 7 ++ .../core/common/util/CollectionUtils.java | 49 ++++++++++++-- .../java/org/opensearch/core/index/Index.java | 52 +++++++++++++-- .../opensearch/core/index/shard/ShardId.java | 60 +++++++++++++++++ .../breaker/AllCircuitBreakerStats.java | 26 ++++++++ .../indices/breaker/CircuitBreakerStats.java | 56 +++++++++++++++- .../breaker/NoneCircuitBreakerService.java | 15 ++++- .../java/org/opensearch/geometry/Circle.java | 66 +++++++++++++++++++ 16 files changed, 512 insertions(+), 30 deletions(-) diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java index c7e7ae6a44a21..9b64932356c10 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java @@ -41,6 +41,15 @@ * @opensearch.internal */ public class Iterators { + + /** + * Concat iterators + * + * @param iterators the iterators to concat + * @param the type of iterator + * @return a new {@link ConcatenatedIterator} + * @throws NullPointerException if iterators is null + */ public static Iterator concat(Iterator... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); @@ -71,6 +80,11 @@ static class ConcatenatedIterator implements Iterator { this.iterators = iterators; } + /** + * Returns {@code true} if the iteration has more elements. (In other words, returns {@code true} if {@link #next} would return an + * element rather than throwing an exception.) + * @return {@code true} if the iteration has more elements + */ @Override public boolean hasNext() { boolean hasNext = false; @@ -81,6 +95,11 @@ public boolean hasNext() { return hasNext; } + /** + * Returns the next element in the iteration. + * @return the next element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ @Override public T next() { if (!hasNext()) { diff --git a/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java index 01afc368fb120..e2a740f72be93 100644 --- a/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java +++ b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java @@ -30,10 +30,13 @@ * @opensearch.experimental - class methods might change */ public class ZstdCompressor implements Compressor { - // An arbitrary header that we use to identify compressed streams - // It needs to be different from other compressors and to not be specific - // enough so that no stream starting with these bytes could be detected as - // a XContent + + /** + * An arbitrary header that we use to identify compressed streams + * It needs to be different from other compressors and to not be specific + * enough so that no stream starting with these bytes could be detected as + * a XContent + * */ private static final byte[] HEADER = new byte[] { 'Z', 'S', 'T', 'D', '\0' }; /** @@ -44,10 +47,20 @@ public class ZstdCompressor implements Compressor { @PublicApi(since = "2.10.0") public static final String NAME = "ZSTD"; + /** + * The compression level for {@link ZstdOutputStreamNoFinalizer} + */ private static final int LEVEL = 3; + /** The buffer size for {@link BufferedInputStream} and {@link BufferedOutputStream} + */ private static final int BUFFER_SIZE = 4096; + /** + * Compares the given bytes with the {@link ZstdCompressor#HEADER} of a compressed stream + * @param bytes the bytes to compare to ({@link ZstdCompressor#HEADER}) + * @return true if the bytes are the {@link ZstdCompressor#HEADER}, false otherwise + */ @Override public boolean isCompressed(BytesReference bytes) { if (bytes.length() < HEADER.length) { @@ -61,11 +74,22 @@ public boolean isCompressed(BytesReference bytes) { return true; } + /** + * Returns the length of the {@link ZstdCompressor#HEADER} + * @return the {@link ZstdCompressor#HEADER} length + */ @Override public int headerLength() { return HEADER.length; } + /** + * Returns a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @param in the compressed {@link InputStream} + * @return a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @throws IOException if an I/O error occurs + * @throws IllegalArgumentException if the input stream is not compressed with ZSTD + */ @Override public InputStream threadLocalInputStream(InputStream in) throws IOException { final byte[] header = in.readNBytes(HEADER.length); @@ -75,17 +99,36 @@ public InputStream threadLocalInputStream(InputStream in) throws IOException { return new ZstdInputStreamNoFinalizer(new BufferedInputStream(in, BUFFER_SIZE), RecyclingBufferPool.INSTANCE); } + /** + * Returns a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @param out the {@link OutputStream} + * @return a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @throws IOException if an I/O error occurs + */ @Override public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { out.write(HEADER); return new ZstdOutputStreamNoFinalizer(new BufferedOutputStream(out, BUFFER_SIZE), RecyclingBufferPool.INSTANCE, LEVEL); } + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to uncompress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + * @throws IOException is never thrown + */ @Override public BytesReference uncompress(BytesReference bytesReference) throws IOException { throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); } + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to compress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + */ @Override public BytesReference compress(BytesReference bytesReference) throws IOException { throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java index 0f75f763d21c1..846950ff17c63 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java @@ -71,17 +71,23 @@ public interface CircuitBreaker { /** * The type of breaker - * + * can be {@link #MEMORY}, {@link #PARENT}, or {@link #NOOP} * @opensearch.internal */ enum Type { - // A regular or ChildMemoryCircuitBreaker + /** A regular or ChildMemoryCircuitBreaker */ MEMORY, - // A special parent-type for the hierarchy breaker service + /** A special parent-type for the hierarchy breaker service */ PARENT, - // A breaker where every action is a noop, it never breaks + /** A breaker where every action is a noop, it never breaks */ NOOP; + /** + * Converts string (case-insensitive) to breaker {@link Type} + * @param value "noop", "parent", or "memory" (case-insensitive) + * @return the breaker {@link Type} + * @throws IllegalArgumentException if value is not "noop", "parent", or "memory" + */ public static Type parseValue(String value) { switch (value.toLowerCase(Locale.ROOT)) { case "noop": @@ -98,13 +104,13 @@ public static Type parseValue(String value) { /** * The breaker durability - * + * can be {@link #TRANSIENT} or {@link #PERMANENT} * @opensearch.internal */ enum Durability { - // The condition that tripped the circuit breaker fixes itself eventually. + /** The condition that tripped the circuit breaker fixes itself eventually. */ TRANSIENT, - // The condition that tripped the circuit breaker requires manual intervention. + /** The condition that tripped the circuit breaker requires manual intervention. */ PERMANENT } @@ -120,11 +126,14 @@ enum Durability { * @param bytes number of bytes to add * @param label string label describing the bytes being added * @return the number of "used" bytes for the circuit breaker + * @throws CircuitBreakingException if the breaker tripped */ double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException; /** * Adjust the circuit breaker without tripping + * @param bytes number of bytes to add + * @return the number of "used" bytes for the circuit breaker */ long addWithoutBreaking(long bytes); @@ -154,7 +163,10 @@ enum Durability { String getName(); /** - * @return whether a tripped circuit breaker will reset itself (transient) or requires manual intervention (permanent). + * Returns the {@link Durability} of this breaker + * @return whether a tripped circuit breaker will + * reset itself ({@link Durability#TRANSIENT}) + * or requires manual intervention ({@link Durability#PERMANENT}). */ Durability getDurability(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java index e6443a0d48ce0..2df116dcad076 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java @@ -46,8 +46,11 @@ */ public class CircuitBreakingException extends OpenSearchException { + /** The number of bytes wanted */ private final long bytesWanted; + /** The circuit breaker limit */ private final long byteLimit; + /** The {@link CircuitBreaker.Durability} of the circuit breaker */ private final CircuitBreaker.Durability durability; public CircuitBreakingException(StreamInput in) throws IOException { @@ -88,6 +91,7 @@ public CircuitBreaker.Durability getDurability() { return durability; } + /** Always returns {@link RestStatus#TOO_MANY_REQUESTS} */ @Override public RestStatus status() { return RestStatus.TOO_MANY_REQUESTS; diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java index 86a0a7ccb96fd..17b9fefd27c99 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java @@ -33,65 +33,120 @@ package org.opensearch.core.common.breaker; /** - * A CircuitBreaker that doesn't increment or adjust, and all operations are - * basically noops - * + * A {@link CircuitBreaker} that doesn't increment or adjust, and all operations are + * basically noops. + * It never trips, limit is always -1, always returns 0 for all metrics. * @opensearch.internal */ public class NoopCircuitBreaker implements CircuitBreaker { - public static final int LIMIT = -1; + /** The limit of this breaker is always -1 */ + public static final int LIMIT = -1; + /** Name of this breaker */ private final String name; + /** + * Creates a new NoopCircuitBreaker (that never trip) with the given name + * @param name the name of this breaker + */ public NoopCircuitBreaker(String name) { this.name = name; } + /** + * This is a noop, a noop breaker never trip + * @param fieldName name of this noop breaker + * @param bytesNeeded bytes needed + */ @Override public void circuitBreak(String fieldName, long bytesNeeded) { // noop } + /** + * This is a noop, always return 0 and never throw/trip + * @param bytes number of bytes to add + * @param label string label describing the bytes being added + * @return always return 0 + * @throws CircuitBreakingException never thrown + */ @Override public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { return 0; } + /** + * This is a noop, nothing is added, always return 0 + * @param bytes number of bytes to add (ignored) + * @return always return 0 + */ @Override public long addWithoutBreaking(long bytes) { return 0; } + /** + * This is a noop, always return 0 + * @return always return 0 + */ @Override public long getUsed() { return 0; } + /** + * A noop breaker have a constant limit of -1 + * @return always return -1 + */ @Override public long getLimit() { return LIMIT; } + /** + * A noop breaker have no overhead, always return 0 + * @return always return 0 + */ @Override public double getOverhead() { return 0; } + /** + * A noop breaker never trip, always return 0 + * @return always return 0 + */ @Override public long getTrippedCount() { return 0; } + /** + * return the name of this breaker + * @return the name of this breaker + */ @Override public String getName() { return this.name; } + /** + * A noop breaker {@link Durability} is always {@link Durability#PERMANENT} + * @return always return {@link Durability#PERMANENT } + */ @Override public Durability getDurability() { return Durability.PERMANENT; } + /** + * Limit and overhead are constant for a noop breaker. + * this is a noop. + * @param limit the desired limit (ignored) + * @param overhead the desired overhead (ignored) + */ @Override - public void setLimitAndOverhead(long limit, double overhead) {} + public void setLimitAndOverhead(long limit, double overhead) { + // noop + } } diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java index e054776d67fdc..8c1efcd00c24e 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java @@ -49,7 +49,8 @@ */ public abstract class AbstractBytesReference implements BytesReference { - private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + /** we cache the hash of this reference since it can be quite costly to re-calculated it */ + private Integer hash = null; private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; @Override diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index 59492193d16dc..cd75bddd680e5 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -37,6 +37,10 @@ /** * Format string for OpenSearch log messages. + *

+ * This class is almost a copy of {@code org.slf4j.helpers.MessageFormatter}

+ * The original code is licensed under the MIT License and is available at : + * MessageFormatter.java * * @opensearch.internal */ @@ -51,6 +55,17 @@ public static String format(final String messagePattern, final Object... argArra return format(null, messagePattern, argArray); } + /** + * (this is almost a copy of {@code org.slf4j.helpers.MessageFormatter.arrayFormat}) + * + * @param prefix the prefix to prepend to the formatted message (can be null) + * @param messagePattern the message pattern which will be parsed and formatted + * @param argArray an array of arguments to be substituted in place of formatting anchors + * @return null if messagePattern is null

+ * messagePattern if argArray is (null or empty) and prefix is null

+ * prefix + messagePattern if argArray is (null or empty) and prefix is not null

+ * formatted message otherwise (even if prefix is null) + */ public static String format(final String prefix, final String messagePattern, final Object... argArray) { if (messagePattern == null) { return null; @@ -110,6 +125,13 @@ public static String format(final String prefix, final String messagePattern, fi return sbuf.toString(); } + /** + * Checks if (delimterStartIndex - 1) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if there is an escape char before the character at delimiterStartIndex.

+ * Always returns false if delimiterStartIndex == 0 (edge case) + */ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex) { if (delimiterStartIndex == 0) { @@ -119,6 +141,13 @@ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex return potentialEscape == ESCAPE_CHAR; } + /** + * Checks if (delimterStartIndex - 2) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if (delimterStartIndex - 2) in messagePattern is an escape character. + * Always returns false if delimiterStartIndex is less than 2 (edge case) + */ static boolean isDoubleEscaped(String messagePattern, int delimiterStartIndex) { return delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR; } diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 1a853877ed0b9..551504ed6f719 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -71,6 +71,12 @@ public TransportAddress(InetAddress address, int port) { this(new InetSocketAddress(address, port)); } + /** + * Creates a new {@link TransportAddress} from a {@link InetSocketAddress}. + * @param address the address to wrap + * @throws IllegalArgumentException if the address is null or not resolved + * @see InetSocketAddress#getAddress() + */ public TransportAddress(InetSocketAddress address) { if (address == null) { throw new IllegalArgumentException("InetSocketAddress must not be null"); @@ -82,7 +88,9 @@ public TransportAddress(InetSocketAddress address) { } /** - * Read from a stream. + * Creates a new {@link TransportAddress} from a {@link StreamInput}. + * @param in the stream to read from + * @throws IOException if an I/O error occurs */ public TransportAddress(StreamInput in) throws IOException { final int len = in.readByte(); @@ -116,6 +124,8 @@ public String getAddress() { /** * Returns the addresses port + * @return the port number, or 0 if the socket is not bound yet. + * @see InetSocketAddress#getPort() */ public int getPort() { return address.getPort(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java index 1f49a3531986c..c15db75d06d49 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java @@ -46,6 +46,13 @@ * helps organize and use size representations that may be maintained * separately across various contexts. * + * It use conventional data storage values (base-2) : + *

    + *
  • 1KB = 1024 bytes
  • + *
  • 1MB = 1024KB
  • + *
  • ...
  • + *
+ * * @opensearch.api */ @PublicApi(since = "1.0.0") diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java index e8dd31fcf1869..5335c98182b64 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java @@ -73,6 +73,16 @@ public static boolean isEmpty(Object[] array) { /** * Return a rotated view of the given list with the given distance. + *
    + *
  • The distance can be negative, in which case the list is rotated to the left.
  • + *
  • The distance can be larger than the size of the list, in which case the list is rotated multiple times.
  • + *
  • The distance can be zero, in which case the list is not rotated.
  • + *
  • The list can be empty, in which case it remains empty.
  • + *
+ * @param list the list to rotate + * @param distance the distance to rotate (positive rotates right, negative rotates left) + * @return a rotated view of the given list with the given distance + * @see RotatedList */ public static List rotate(final List list, int distance) { if (list.isEmpty()) { @@ -92,7 +102,13 @@ public static List rotate(final List list, int distance) { } /** - * in place de-duplicates items in a list + * In place de-duplicates items in a list + * Noop if the list is empty or has one item. + * + * @throws NullPointerException if the list is `null` or comparator is `null` + * @param array the list to de-duplicate + * @param comparator the comparator to use to compare items + * @param the type of the items in the list */ public static void sortAndDedup(final List array, Comparator comparator) { // base case: one item @@ -115,6 +131,12 @@ public static void sortAndDedup(final List array, Comparator comparato array.subList(deduped.nextIndex(), array.size()).clear(); } + /** + * Converts a collection of Integers to an array of ints. + * @param ints The collection of Integers to convert + * @return The array of ints + * @throws NullPointerException if ints is null + */ public static int[] toArray(Collection ints) { Objects.requireNonNull(ints); return ints.stream().mapToInt(s -> s).toArray(); @@ -134,6 +156,11 @@ public static void ensureNoSelfReferences(Object value, String messageHint) { } } + /** + * Converts an object to an Iterable, if possible. + * @param value The object to convert + * @return The Iterable, or null if the object cannot be converted + */ @SuppressWarnings("unchecked") private static Iterable convert(Object value) { if (value == null) { @@ -192,6 +219,13 @@ private static class RotatedList extends AbstractList implements RandomAcc private final List in; private final int distance; + /** + * Creates a rotated list + * @param list The list to rotate + * @param distance The distance to rotate to the right + * @throws IllegalArgumentException if the distance is negative or greater than the size of the list; + * or if the list is not a {@link RandomAccess} list + */ RotatedList(List list, int distance) { if (distance < 0 || distance >= list.size()) { throw new IllegalArgumentException(); @@ -218,6 +252,13 @@ public int size() { } } + /** + * Converts an {@link Iterable} to an {@link ArrayList}. + * @param elements The iterable to convert + * @param the type the elements + * @return an {@link ArrayList} + * @throws NullPointerException if elements is null + */ @SuppressWarnings("unchecked") public static ArrayList iterableAsArrayList(Iterable elements) { if (elements == null) { @@ -297,11 +338,11 @@ public static List> eagerPartition(List list, int size) { } /** - * Check if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. If - * collection contains a null element it means it is not empty. + * Checks if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. + * If collection contains a null element it means it is not empty. * * @param collection {@link Collection} - * @return boolean + * @return true if collection is null or {@code isEmpty()}, false otherwise * @param Element */ public static boolean isEmpty(final Collection collection) { diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index fdff43f3c9139..77cc628213df9 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -49,6 +49,8 @@ /** * A value class representing the basic required properties of an OpenSearch index. * + * (This class is immutable.) + * * @opensearch.api */ @PublicApi(since = "1.0.0") @@ -57,6 +59,7 @@ public class Index implements Writeable, ToXContentObject { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; + private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { INDEX_PARSER.declareString(Builder::name, new ParseField(INDEX_NAME_KEY)); @@ -66,39 +69,74 @@ public class Index implements Writeable, ToXContentObject { private final String name; private final String uuid; + /** + * Creates a new Index instance with name and unique identifier + * + * @param name the name of the index + * @param uuid the unique identifier of the index + * @throws NullPointerException if either name or uuid are null + */ public Index(String name, String uuid) { this.name = Objects.requireNonNull(name); this.uuid = Objects.requireNonNull(uuid); } /** - * Read from a stream. + * Creates a new Index instance from a {@link StreamInput}. + * Reads the name and unique identifier from the stream. + * + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) */ public Index(StreamInput in) throws IOException { this.name = in.readString(); this.uuid = in.readString(); } + /** + * Gets the name of the index. + * + * @return the name of the index. + */ public String getName() { return this.name; } + /** + * Gets the unique identifier of the index. + * + * @return the unique identifier of the index. "_na_" if {@link Strings#UNKNOWN_UUID_VALUE}. + */ public String getUUID() { return uuid; } + /** + * Returns either the name and unique identifier of the index + * or only the name if the uuid is {@link Strings#UNKNOWN_UUID_VALUE}. + * + * If we have a uuid we put it in the toString so it'll show up in logs + * which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + * + * @return {@code "[name/uuid]"} or {@code "[name]"} + */ @Override public String toString() { - /* - * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather - * than the name as the lookup key for the index. - */ if (Strings.UNKNOWN_UUID_VALUE.equals(uuid)) { return "[" + name + "]"; } return "[" + name + "/" + uuid + "]"; } + /** + * Checks if this index is the same as another index by comparing the name and unique identifier. + * If both uuid are {@link Strings#UNKNOWN_UUID_VALUE} then only the name is compared. + * + * @param o the index to compare to + * @return true if the name and unique identifier are the same, false otherwise. + */ @Override public boolean equals(Object o) { if (this == o) { @@ -118,6 +156,10 @@ public int hashCode() { return result; } + /** Writes the name and unique identifier to the {@link StreamOutput} + * + * @param out The stream to write to + */ @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index adea6cd8f0687..984434190b486 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -55,44 +55,87 @@ public class ShardId implements Comparable, ToXContentFragment, Writeab private final int shardId; private final int hashCode; + /** + * Constructs a new shard id. + * @param index the index name + * @param shardId the shard id + */ public ShardId(Index index, int shardId) { this.index = index; this.shardId = shardId; this.hashCode = computeHashCode(); } + /** + * Constructs a new shard id with the given index name, index unique identifier, and shard id. + * @param index the index name + * @param indexUUID the index unique identifier + * @param shardId the shard id + */ public ShardId(String index, String indexUUID, int shardId) { this(new Index(index, indexUUID), shardId); } + /** + * Constructs a new shardId from a stream. + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) + */ public ShardId(StreamInput in) throws IOException { index = new Index(in); shardId = in.readVInt(); hashCode = computeHashCode(); } + /** + * Writes this shard id to a stream. + * @param out the stream to write to + * @throws IOException if an error occurs while writing to the stream + */ @Override public void writeTo(StreamOutput out) throws IOException { index.writeTo(out); out.writeVInt(shardId); } + /** + * Returns the index of this shard id. + * @return the index of this shard id + */ public Index getIndex() { return index; } + /** + * Returns the name of the index of this shard id. + * @return the name of the index of this shard id + */ public String getIndexName() { return index.getName(); } + /** + * Return the shardId of this shard id. + * @return the shardId of this shard id + * @see #getId() + */ public int id() { return this.shardId; } + /** + * Returns the shard id of this shard id. + * @return the shard id of this shard id + */ public int getId() { return id(); } + /** + * Returns a string representation of this shard id. + * @return "[indexName][shardId]" + */ @Override public String toString() { return "[" + index.getName() + "][" + shardId + "]"; @@ -100,9 +143,13 @@ public String toString() { /** * Parse the string representation of this shardId back to an object. + * * We lose index uuid information here, but since we use toString in * rest responses, this is the best we can do to reconstruct the object * on the client side. + * + * @param shardIdString the string representation of the shard id + * (Expect a string of format "[indexName][shardId]" (square brackets included)) */ public static ShardId fromString(String shardIdString) { int splitPosition = shardIdString.indexOf("]["); @@ -122,17 +169,30 @@ public boolean equals(Object o) { return shardId == shardId1.shardId && index.equals(shardId1.index); } + /** Returns the hash code of this shard id. + * + * @return the hash code of this shard id + */ @Override public int hashCode() { return hashCode; } + /** Computes the hash code of this shard id. + * + * @return the hash code of this shard id. + */ private int computeHashCode() { int result = index != null ? index.hashCode() : 0; result = 31 * result + shardId; return result; } + /** + * Compares this ShardId with the specified ShardId. + * @param o the ShardId to be compared. + * @return a negative integer, zero, or a positive integer if this ShardId is less than, equal to, or greater than the specified ShardId + */ @Override public int compareTo(ShardId o) { if (o.getId() == shardId) { diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java index ab887acb85a87..3ce8b4953b9d6 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -47,25 +47,51 @@ */ public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { + /** An array of all the circuit breaker stats */ private final CircuitBreakerStats[] allStats; + /** + * Constructs the instance + * + * @param allStats an array of all the circuit breaker stats + */ public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) { this.allStats = allStats; } + /** + * Constructs the new instance from {@link StreamInput} + * @param in the {@link StreamInput} to read from + * @throws IOException If an error occurs while reading from the StreamInput + * @see #writeTo(StreamOutput) + */ public AllCircuitBreakerStats(StreamInput in) throws IOException { allStats = in.readArray(CircuitBreakerStats::new, CircuitBreakerStats[]::new); } + /** + * Writes this instance into a {@link StreamOutput} + * @param out the {@link StreamOutput} to write to + * @throws IOException if an error occurs while writing to the StreamOutput + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(allStats); } + /** + * Returns inner stats instances for all circuit breakers + * @return inner stats instances for all circuit breakers + */ public CircuitBreakerStats[] getAllStats() { return this.allStats; } + /** + * Returns the stats for a specific circuit breaker + * @param name the name of the circuit breaker + * @return the {@link CircuitBreakerStats} for the circuit breaker, null if the circuit breaker with such name does not exist + */ public CircuitBreakerStats getStats(String name) { for (CircuitBreakerStats stats : allStats) { if (stats.getName().equals(name)) { diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java index 0e53a38908a96..9207d3ea77227 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -43,18 +43,33 @@ import java.util.Locale; /** - * Class encapsulating stats about the circuit breaker + * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} * * @opensearch.internal */ public class CircuitBreakerStats implements Writeable, ToXContentObject { + /** The name of the circuit breaker */ private final String name; + /** The limit size in byte of the circuit breaker. Field : "limit_size_in_bytes" */ private final long limit; + /** The estimated size in byte of the breaker. Field : "estimated_size_in_bytes" */ private final long estimated; + /** The number of times the breaker has been tripped. Field : "tripped" */ private final long trippedCount; + /** The overhead of the breaker. Field : "overhead" */ private final double overhead; + /** + * Constructs new instance + * + * @param name The name of the circuit breaker + * @param limit The limit size in byte of the circuit breaker + * @param estimated The estimated size in byte of the breaker + * @param overhead The overhead of the breaker + * @param trippedCount The number of times the breaker has been tripped + * @see org.opensearch.core.common.breaker.CircuitBreaker + */ public CircuitBreakerStats(String name, long limit, long estimated, double overhead, long trippedCount) { this.name = name; this.limit = limit; @@ -63,6 +78,14 @@ public CircuitBreakerStats(String name, long limit, long estimated, double overh this.overhead = overhead; } + /** + * Constructs new instance from the {@link StreamInput} + * + * @param in The StreamInput + * @throws IOException if an error occurs while reading from the StreamInput + * @see org.opensearch.core.common.breaker.CircuitBreaker + * @see #writeTo(StreamOutput) + */ public CircuitBreakerStats(StreamInput in) throws IOException { this.limit = in.readLong(); this.estimated = in.readLong(); @@ -71,6 +94,13 @@ public CircuitBreakerStats(StreamInput in) throws IOException { this.name = in.readString(); } + /** + * Writes this instance into a {@link StreamOutput} + * + * @param out The StreamOutput + * @throws IOException if an error occurs while writing to the StreamOutput + * @see #CircuitBreakerStats(StreamInput) + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(limit); @@ -80,22 +110,42 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); } + /** + * Returns the name of the circuit breaker + * @return The name of the circuit breaker + */ public String getName() { return this.name; } + /** + * Returns the limit size in byte of the circuit breaker + * @return The limit size in byte of the circuit breaker + */ public long getLimit() { return this.limit; } + /** + * Returns the estimated size in byte of the breaker + * @return The estimated size in byte of the breaker + */ public long getEstimated() { return this.estimated; } + /** + * Returns the number of times the breaker has been tripped + * @return The number of times the breaker has been tripped + */ public long getTrippedCount() { return this.trippedCount; } + /** + * Returns the overhead of the breaker + * @return The overhead of the breaker + */ public double getOverhead() { return this.overhead; } @@ -113,6 +163,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + /** + * Returns a String representation of this CircuitBreakerStats + * @return "[name,limit=limit/limit_human,estimated=estimated/estimated_human,overhead=overhead,tripped=trippedCount]" + */ @Override public String toString() { return "[" diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java index 4095fd32b6d3c..49c5a393328b9 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java @@ -36,8 +36,9 @@ import org.opensearch.core.common.breaker.NoopCircuitBreaker; /** - * Class that returns a breaker that never breaks + * Class that returns a breaker that use the NoopCircuitBreaker and never breaks * + * @see org.opensearch.core.common.breaker.NoopCircuitBreaker * @opensearch.internal */ public class NoneCircuitBreakerService extends CircuitBreakerService { @@ -48,6 +49,12 @@ public NoneCircuitBreakerService() { super(); } + /** + * Returns a breaker that use the NoopCircuitBreaker and never breaks + * + * @param name name of the breaker (ignored) + * @return a NoopCircuitBreaker + */ @Override public CircuitBreaker getBreaker(String name) { return breaker; @@ -58,6 +65,12 @@ public AllCircuitBreakerStats stats() { return new AllCircuitBreakerStats(new CircuitBreakerStats[] { stats(CircuitBreaker.FIELDDATA) }); } + /** + * Always returns the same stats, a NoopCircuitBreaker never breaks and all operations are noops. + * + * @param name name of the breaker (ignored) + * @return always "fielddata", limit: -1, estimated: -1, overhead: 0, trippedCount: 0 + */ @Override public CircuitBreakerStats stats(String name) { return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); diff --git a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java index 6f8b0dc6929cc..cf1dce8966e1f 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java @@ -39,12 +39,19 @@ * and optional altitude in meters. */ public class Circle implements Geometry { + + /** Empty circle : x=0, y=0, z=NaN radius=-1 */ public static final Circle EMPTY = new Circle(); + /** Latitude of the center of the circle in degrees */ private final double y; + /** Longitude of the center of the circle in degrees */ private final double x; + /** Altitude of the center of the circle in meters (NaN if irrelevant) */ private final double z; + /** Radius of the circle in meters */ private final double radiusMeters; + /** Create an {@link #EMPTY} circle */ private Circle() { y = 0; x = 0; @@ -52,10 +59,23 @@ private Circle() { radiusMeters = -1; } + /** + * Create a circle with no altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double radiusMeters) { this(x, y, Double.NaN, radiusMeters); } + /** + * Create a circle with altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param z Altitude of the center of the circle in meters + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double z, final double radiusMeters) { this.y = y; this.x = x; @@ -66,39 +86,68 @@ public Circle(final double x, final double y, final double z, final double radiu } } + /** + * @return The type of this geometry (always {@link ShapeType#CIRCLE}) + */ @Override public ShapeType type() { return ShapeType.CIRCLE; } + /** + * @return The y (latitude) of the center of the circle in degrees + */ public double getY() { return y; } + /** + * @return The x (longitude) of the center of the circle in degrees + */ public double getX() { return x; } + /** + * @return The radius of the circle in meters + */ public double getRadiusMeters() { return radiusMeters; } + /** + * @return The altitude of the center of the circle in meters (NaN if irrelevant) + */ public double getZ() { return z; } + /** + * @return The latitude (y) of the center of the circle in degrees + */ public double getLat() { return y; } + /** + * @return The longitude (x) of the center of the circle in degrees + */ public double getLon() { return x; } + /** + * @return The altitude (z) of the center of the circle in meters (NaN if irrelevant) + */ public double getAlt() { return z; } + /** + * Compare this circle to another circle. + * @param o The other circle + * @return True if the two circles are equal in all their properties. False if null or different. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -111,6 +160,9 @@ public boolean equals(Object o) { return (Double.compare(circle.z, z) == 0); } + /** + * @return The hashcode of this circle. + */ @Override public int hashCode() { int result; @@ -126,11 +178,22 @@ public int hashCode() { return result; } + /** + * Visit this circle with a {@link GeometryVisitor}. + * @param visitor The visitor + * @param The return type of the visitor + * @param The exception type of the visitor + * @return The result of the visitor + * @throws E The exception thrown by the visitor + */ @Override public T visit(GeometryVisitor visitor) throws E { return visitor.visit(this); } + /** + * @return True if this circle is empty (radius less than 0) + */ @Override public boolean isEmpty() { return radiusMeters < 0; @@ -141,6 +204,9 @@ public String toString() { return WellKnownText.INSTANCE.toWKT(this); } + /** + * @return True if this circle has an altitude. False if NaN. + */ @Override public boolean hasZ() { return Double.isNaN(z) == false; From e6dec2906f07753cc93030551694864aef4e3f93 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Thu, 14 Sep 2023 15:46:58 -0700 Subject: [PATCH 02/23] Disable stalled PR workflow (#10052) Signed-off-by: Kunal Kotwani --- .github/workflows/stalled.yml | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 .github/workflows/stalled.yml diff --git a/.github/workflows/stalled.yml b/.github/workflows/stalled.yml deleted file mode 100644 index bc0a98fff511e..0000000000000 --- a/.github/workflows/stalled.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Close Stalled PRs -on: - schedule: - - cron: '15 15 * * *' # Run every day at 15:15 UTC / 7:15 PST / 8:15 PDT -permissions: - pull-requests: write -jobs: - stale: - if: github.repository == 'opensearch-project/OpenSearch' - runs-on: ubuntu-latest - steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v1.5.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - name: Stale PRs - uses: actions/stale@v8 - with: - repo-token: ${{ steps.github_app_token.outputs.token }} - stale-pr-label: 'stalled' - stale-pr-message: 'This PR is stalled because it has been open for 30 days with no activity. Remove stalled label or comment or this will be closed in 7 days.' - close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.' - days-before-pr-stale: 30 - days-before-pr-close: 7 - days-before-issue-stale: -1 - days-before-issue-close: -1 From d77fb2489dfe2202a6bf8f0c82c8a5cf453c8262 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Thu, 14 Sep 2023 16:28:01 -0700 Subject: [PATCH 03/23] Add release notes for 1.3.13 (#10058) Signed-off-by: Kunal Kotwani --- release-notes/opensearch.release-notes-1.3.13.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.13.md diff --git a/release-notes/opensearch.release-notes-1.3.13.md b/release-notes/opensearch.release-notes-1.3.13.md new file mode 100644 index 0000000000000..3ece2c8f91984 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.13.md @@ -0,0 +1,7 @@ +## 2023-09-14 Version 1.3.13 Release Notes + +### Upgrades +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) From 7d1fa7d11d32bf8b03f9f8641ed7f147b6eb2e34 Mon Sep 17 00:00:00 2001 From: Harish Bhakuni Date: Thu, 14 Sep 2023 18:14:42 -0700 Subject: [PATCH 04/23] Parameterize ITs to run with concurrent search enabled (#9441) * Parameterize ITs to run with concurrent search enabled Signed-off-by: Harish Bhakuni * Address PR Changes Signed-off-by: Harish Bhakuni * Address PR Comments Signed-off-by: Harish Bhakuni --------- Signed-off-by: Harish Bhakuni Co-authored-by: Harish Bhakuni --- .../indices/IndicesRequestCacheIT.java | 25 ++++++++++++++-- .../indices/stats/IndexStatsIT.java | 24 +++++++++++++-- .../org/opensearch/script/ScriptCacheIT.java | 25 ++++++++++++++-- .../search/StressSearchServiceReaperIT.java | 26 ++++++++++++++-- .../opensearch/search/pit/PitMultiNodeIT.java | 25 +++++++++++++++- .../scriptfilter/ScriptQuerySearchIT.java | 23 +++++++++++++- .../search/scroll/DuelScrollIT.java | 25 ++++++++++++++-- .../search/scroll/SearchScrollIT.java | 27 +++++++++++++++-- .../SearchScrollWithFailingNodesIT.java | 30 ++++++++++++++++--- .../search/searchafter/SearchAfterIT.java | 27 +++++++++++++++-- .../search/sort/GeoDistanceSortBuilderIT.java | 25 ++++++++++++++-- .../search/sort/SortFromPluginIT.java | 25 ++++++++++++++-- .../suggest/CompletionSuggestSearchIT.java | 24 +++++++++++++-- .../ContextCompletionSuggestSearchIT.java | 24 +++++++++++++-- .../search/suggest/SuggestSearchIT.java | 24 +++++++++++++-- .../opensearch/similarity/SimilarityIT.java | 28 +++++++++++++++-- 16 files changed, 375 insertions(+), 32 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 12fee85288bc2..98a22717019cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.search.SearchResponse; @@ -40,13 +42,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.time.ZoneId; @@ -54,8 +57,10 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -64,7 +69,23 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class IndicesRequestCacheIT extends OpenSearchIntegTestCase { +public class IndicesRequestCacheIT extends ParameterizedOpenSearchIntegTestCase { + public IndicesRequestCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } // One of the primary purposes of the query cache is to cache aggs results public void testCacheAggs() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index ee904dbcb6924..a0f01acd1f8e9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -55,6 +57,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; @@ -80,9 +83,9 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -105,6 +108,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -118,7 +122,23 @@ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0) @SuppressCodecs("*") // requires custom completion format -public class IndexStatsIT extends OpenSearchIntegTestCase { +public class IndexStatsIT extends ParameterizedOpenSearchIntegTestCase { + public IndexStatsIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java index e125492d4b2c5..2fbaf4ea5a4d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java @@ -8,8 +8,11 @@ package org.opensearch.script; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.MockEngineFactoryPlugin; @@ -18,12 +21,13 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.MockSearchService; import org.opensearch.test.MockHttpTransport; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -31,9 +35,26 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.apache.logging.log4j.core.util.Throwables.getRootCause; -public class ScriptCacheIT extends OpenSearchIntegTestCase { +public class ScriptCacheIT extends ParameterizedOpenSearchIntegTestCase { + public ScriptCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java index 42e515cca9b6b..a61102b9db144 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java @@ -31,23 +31,45 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.ExecutionException; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.SUITE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @ClusterScope(scope = SUITE) -public class StressSearchServiceReaperIT extends OpenSearchIntegTestCase { +public class StressSearchServiceReaperIT extends ParameterizedOpenSearchIntegTestCase { + public StressSearchServiceReaperIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index 61a5f76a32979..e42f12709c948 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -8,6 +8,8 @@ package org.opensearch.search.pit; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -30,10 +32,12 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -41,6 +45,8 @@ import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -53,6 +59,7 @@ import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -60,7 +67,23 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeIT extends OpenSearchIntegTestCase { +public class PitMultiNodeIT extends ParameterizedOpenSearchIntegTestCase { + public PitMultiNodeIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Before public void setupIndex() throws ExecutionException, InterruptedException { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index e081be0af51a2..34967528f2c4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.scriptfilter; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; @@ -47,6 +50,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -61,12 +65,29 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class ScriptQuerySearchIT extends OpenSearchIntegTestCase { +public class ScriptQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { + public ScriptQuerySearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index e0a54e9b4fc36..c7a6d18f881c6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,24 +40,44 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -public class DuelScrollIT extends OpenSearchIntegTestCase { +public class DuelScrollIT extends ParameterizedOpenSearchIntegTestCase { + public DuelScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index aec6a03d3e57f..0eee136acac69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -43,6 +45,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -57,11 +60,13 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.After; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -70,6 +75,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -86,7 +92,24 @@ /** * Tests for scrolling. */ -public class SearchScrollIT extends OpenSearchIntegTestCase { +public class SearchScrollIT extends ParameterizedOpenSearchIntegTestCase { + public SearchScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @After public void cleanup() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index c6519cc3a0cb3..f16b9a4d67b49 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -32,26 +32,50 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -public class SearchScrollWithFailingNodesIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 0) +public class SearchScrollWithFailingNodesIT extends ParameterizedOpenSearchIntegTestCase { + public SearchScrollWithFailingNodesIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected int numberOfShards() { return 2; @@ -63,8 +87,6 @@ protected int numberOfReplicas() { } public void testScanScrollWithShardExceptions() throws Exception { - internalCluster().startNode(); - internalCluster().startNode(); assertAcked( prepareCreate("test") // Enforces that only one shard can only be allocated to a single node diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 22c0a9cbbab17..00ac574b8bd72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.searchafter; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.CreatePitAction; @@ -43,30 +45,51 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SearchAfterIT extends OpenSearchIntegTestCase { +public class SearchAfterIT extends ParameterizedOpenSearchIntegTestCase { private static final String INDEX_NAME = "test"; private static final int NUM_DOCS = 100; + public SearchAfterIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testsShouldFail() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 5a0ca1d13633e..7880fc24fd846 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -39,28 +41,47 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.sort.SortBuilders.fieldSort; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSortValues; import static org.hamcrest.Matchers.closeTo; -public class GeoDistanceSortBuilderIT extends OpenSearchIntegTestCase { +public class GeoDistanceSortBuilderIT extends ParameterizedOpenSearchIntegTestCase { + public GeoDistanceSortBuilderIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private static final String LOCATION_FIELD = "location"; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 5b896f9a1fe57..7bcded86fcaa8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -8,21 +8,42 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.plugin.CustomSortBuilder; import org.opensearch.search.sort.plugin.CustomSortPlugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; -public class SortFromPluginIT extends OpenSearchIntegTestCase { +public class SortFromPluginIT extends ParameterizedOpenSearchIntegTestCase { + public SortFromPluginIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 7183f18acbadf..30dba87f8ef5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.lucene.analysis.TokenStreamToAutomaton; @@ -47,6 +48,7 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; @@ -63,7 +65,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -81,6 +83,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; @@ -96,7 +99,24 @@ import static org.hamcrest.Matchers.notNullValue; @SuppressCodecs("*") // requires custom completion format -public class CompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class CompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public CompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java index 7f5e8abfc3b52..bac3e7fb61683 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; @@ -40,6 +41,7 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -51,11 +53,12 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.search.suggest.completion.context.GeoQueryContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -64,12 +67,29 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format -public class ContextCompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class ContextCompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public ContextCompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 017dd5ea668de..32bb0e34054bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,6 +41,7 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -54,7 +57,7 @@ import org.opensearch.search.suggest.phrase.StupidBackoff; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -73,6 +76,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.opensearch.search.suggest.SuggestBuilders.termSuggestion; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -92,7 +96,23 @@ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that * request, modify again, request again, etc. This makes it very obvious what changes between requests. */ -public class SuggestSearchIT extends OpenSearchIntegTestCase { +public class SuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public SuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { diff --git a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java index 929aac388b678..8c9bff9833462 100644 --- a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java @@ -32,17 +32,41 @@ package org.opensearch.similarity; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimilarityIT extends OpenSearchIntegTestCase { +public class SimilarityIT extends ParameterizedOpenSearchIntegTestCase { + public SimilarityIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testCustomBM25Similarity() throws Exception { try { client().admin().indices().prepareDelete("test").execute().actionGet(); From 74fcaab442ba6dec314ac31615f814909bc78297 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 15 Sep 2023 16:14:58 +0530 Subject: [PATCH 05/23] Fix flaky behavior of RemoteStoreRestoreIT.testRateLimitedRemoteDownloads (#9887) * Fix flaky behavior of RemoteStoreRestoreIT.testRateLimitedRemoteDownloads Signed-off-by: Sachin Kale --- .../opensearch/remotestore/RemoteStoreBaseIntegTestCase.java | 2 +- .../org/opensearch/remotestore/RemoteStoreRestoreIT.java | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 9a684ce0a1482..621fb262c0a91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -230,7 +230,7 @@ public static Settings buildRemoteStoreNodeAttributes( if (withRateLimiterAttributes) { settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "2kb") + .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "4kb") .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 489f4c52d4298..65335f444a2df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -406,9 +406,10 @@ public void testRateLimitedRemoteDownloads() throws Exception { for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { downloadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteDownloadThrottleTimeInNanos(); } - assertThat(downloadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos())); + assertThat(downloadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(3, 5)).nanos())); }, 30, TimeUnit.SECONDS); - ensureGreen(INDEX_NAME); + // Waiting for extended period for green state so that rate limit does not cause flakiness + ensureGreen(TimeValue.timeValueSeconds(120), INDEX_NAME); // This is required to get updated number from already active shards which were not restored assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); assertEquals(0, getNumShards(INDEX_NAME).numReplicas); From 483c2e36f6c8f2e326279b3b0e598d21072bdc14 Mon Sep 17 00:00:00 2001 From: Gagan Juneja Date: Fri, 15 Sep 2023 21:15:24 +0530 Subject: [PATCH 06/23] Add tracing instrumentation in transport service (#10042) * Add instrumentation to transport service Signed-off-by: Gagan Juneja * Add instrumentation to transport service Signed-off-by: Gagan Juneja * Add javadoc Signed-off-by: Gagan Juneja --------- Signed-off-by: Gagan Juneja Co-authored-by: Gagan Juneja --- CHANGELOG.md | 1 + .../TelemetryTracerEnabledSanityIT.java | 2 +- .../single/SingleNodeDiscoveryIT.java | 2 + .../TraceableTransportResponseHandler.java | 107 ++++++++++++++++++ .../tracing/handler/package-info.java | 12 ++ .../transport/TransportResponseHandler.java | 7 ++ .../transport/TransportService.java | 99 +++++++++------- .../test/OpenSearchIntegTestCase.java | 3 +- 8 files changed, 188 insertions(+), 45 deletions(-) create mode 100644 server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a52c3d7f2a3f..08f3c3abd4162 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) - Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) ### Deprecated diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 2d0111e64faad..8a49a0abf5512 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -89,7 +89,7 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; if (!exporter.getFinishedSpanItems().isEmpty()) { - validators.validate(exporter.getFinishedSpanItems(), 2); + validators.validate(exporter.getFinishedSpanItems(), 6); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java index 90bdcf7fded11..1f6c8eac6c391 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java @@ -76,6 +76,7 @@ public void testSingleNodesDoNotDiscoverEachOther() throws IOException, Interrup @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "single-node") .put("transport.type", getTestTransportType()) /* @@ -142,6 +143,7 @@ public boolean innerMatch(final LogEvent event) { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "zen") .put("transport.type", getTestTransportType()) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java new file mode 100644 index 0000000000000..abddfcc6cebc1 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.handler; + +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link TransportResponseHandler} + * @param TransportResponse + */ +public class TraceableTransportResponseHandler implements TransportResponseHandler { + + private final Span span; + private final TransportResponseHandler delegate; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableTransportResponseHandler(TransportResponseHandler delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transportResponseHandler + */ + public static TransportResponseHandler create( + TransportResponseHandler delegate, + Span span, + Tracer tracer + ) { + if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + return new TraceableTransportResponseHandler(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleResponse(response); + } finally { + span.endSpan(); + } + } + + @Override + public void handleException(TransportException exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleException(exp); + } finally { + span.setError(exp); + span.endSpan(); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void handleRejection(Exception exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleRejection(exp); + } finally { + span.endSpan(); + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java new file mode 100644 index 0000000000000..ff9f8f57dc07c --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.handler; diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 0b39983cc3bee..90e94e52515ce 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -52,6 +52,13 @@ public interface TransportResponseHandler extends W String executor(); + /** + * This method should be handling the rejection/failure scenarios where connection to the node is rejected or failed. + * It should be used to clear up the resources held by the {@link TransportResponseHandler}. + * @param exp exception + */ + default void handleRejection(Exception exp) {}; + default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { final TransportResponseHandler self = this; return new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 52274872e8cc8..32bedb52a9cef 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -66,7 +66,11 @@ import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.handler.TraceableTransportResponseHandler; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -333,6 +337,7 @@ protected void doStop() { getExecutorService().execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + holderToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -345,6 +350,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + holderToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", @@ -861,53 +867,60 @@ public final void sendRequest( final TransportRequestOptions options, final TransportResponseHandler handler ) { - try { - logger.debug("Action: " + action); - final TransportResponseHandler delegate; - if (request.getParentTask().isSet()) { - // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. - final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); - delegate = new TransportResponseHandler() { - @Override - public void handleResponse(T response) { - unregisterChildNode.close(); - handler.handleResponse(response); - } + final Span span = tracer.startSpan(SpanBuilder.from(action, connection)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + TransportResponseHandler traceableTransportResponseHandler = TraceableTransportResponseHandler.create(handler, span, tracer); + try { + logger.debug("Action: " + action); + final TransportResponseHandler delegate; + if (request.getParentTask().isSet()) { + // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. + final Releasable unregisterChildNode = taskManager.registerChildNode( + request.getParentTask().getId(), + connection.getNode() + ); + delegate = new TransportResponseHandler() { + @Override + public void handleResponse(T response) { + unregisterChildNode.close(); + traceableTransportResponseHandler.handleResponse(response); + } - @Override - public void handleException(TransportException exp) { - unregisterChildNode.close(); - handler.handleException(exp); - } + @Override + public void handleException(TransportException exp) { + unregisterChildNode.close(); + traceableTransportResponseHandler.handleException(exp); + } - @Override - public String executor() { - return handler.executor(); - } + @Override + public String executor() { + return traceableTransportResponseHandler.executor(); + } - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } + @Override + public T read(StreamInput in) throws IOException { + return traceableTransportResponseHandler.read(in); + } - @Override - public String toString() { - return getClass().getName() + "/[" + action + "]:" + handler.toString(); - } - }; - } else { - delegate = handler; - } - asyncSender.sendRequest(connection, action, request, options, delegate); - } catch (final Exception ex) { - // the caller might not handle this so we invoke the handler - final TransportException te; - if (ex instanceof TransportException) { - te = (TransportException) ex; - } else { - te = new TransportException("failure to send", ex); + @Override + public String toString() { + return getClass().getName() + "/[" + action + "]:" + handler.toString(); + } + }; + } else { + delegate = traceableTransportResponseHandler; + } + asyncSender.sendRequest(connection, action, request, options, delegate); + } catch (final Exception ex) { + // the caller might not handle this so we invoke the handler + final TransportException te; + if (ex instanceof TransportException) { + te = (TransportException) ex; + } else { + te = new TransportException("failure to send", ex); + } + traceableTransportResponseHandler.handleException(te); } - handler.handleException(te); } } @@ -1017,6 +1030,7 @@ private void sendRequestInternal( threadPool.executor(executor).execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + contextToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -1029,6 +1043,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + contextToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 25f453fe024ff..6e064f943ca07 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2300,9 +2300,8 @@ public static void afterClass() throws Exception { INSTANCE.printTestMessage("cleaning up after"); INSTANCE.afterInternal(true); checkStaticState(true); - StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } - + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } finally { SUITE_SEED = null; currentCluster = null; From 4a4a8fa6c04fe57ef5037fd0d49c1c416a1f3e24 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 15 Sep 2023 22:00:28 +0530 Subject: [PATCH 07/23] Always create empty translog on replica for remote store enabled index (#10012) Signed-off-by: Sachin Kale --- .../opensearch/remotestore/RemoteStoreIT.java | 70 +++++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 44 ++++++++---- .../index/translog/TranslogHeader.java | 6 +- 3 files changed, 107 insertions(+), 13 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index bd019693f01ff..3ccf563941f9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; @@ -33,16 +34,20 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.shard.RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; @@ -346,4 +351,69 @@ private void clearClusterBufferIntervalSetting(String clusterManagerName) { .setTransientSettings(Settings.builder().putNull(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())) .get(); } + + public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + Path absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + + logger.info("--> Create index and ingest 50 docs"); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + String originalIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(originalIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, originalIndexUUID); + + ensureGreen(); + + logger.info("--> take a snapshot"); + client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices(INDEX_NAME).setWaitForCompletion(true).get(); + + logger.info("--> wipe all indices"); + cluster().wipeIndices(INDEX_NAME); + + logger.info("--> Create index with the same name, different UUID"); + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(TimeValue.timeValueSeconds(30), INDEX_NAME); + + String newIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(newIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, newIndexUUID); + assertNotEquals(newIndexUUID, originalIndexUUID); + + logger.info("--> close index"); + client().admin().indices().prepareClose(INDEX_NAME).get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + flushAndRefresh(INDEX_NAME); + + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 8ed75330f938e..34c5ed2112482 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -196,6 +196,7 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.util.ArrayList; @@ -2355,19 +2356,38 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { - syncSegmentsFromRemoteSegmentStore(false); - } - if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + if (indexSettings.isRemoteStoreEnabled()) { + // Download missing segments from remote segment store. if (syncFromRemote) { - syncRemoteTranslogAndUpdateGlobalCheckpoint(); - } else { - // we will enter this block when we do not want to recover from remote translog. - // currently only during snapshot restore, we are coming into this block. - // here, as while initiliazing remote translog we cannot skip downloading translog files, - // so before that step, we are deleting the translog files present in remote store. - deleteTranslogFilesFromRemoteTranslog(); - + syncSegmentsFromRemoteSegmentStore(false); + } + if (shardRouting.primary()) { + if (syncFromRemote) { + syncRemoteTranslogAndUpdateGlobalCheckpoint(); + } else { + // we will enter this block when we do not want to recover from remote translog. + // currently only during snapshot restore, we are coming into this block. + // here, as while initiliazing remote translog we cannot skip downloading translog files, + // so before that step, we are deleting the translog files present in remote store. + deleteTranslogFilesFromRemoteTranslog(); + } + } else if (syncFromRemote) { + // For replicas, when we download segments from remote segment store, we need to make sure that local + // translog is having the same UUID that is referred by the segments. If they are different, engine open + // fails with TranslogCorruptedException. It is safe to create empty translog for remote store enabled + // indices as replica would only need to read translog in failover scenario and we always fetch data + // from remote translog at the time of failover. + final SegmentInfos lastCommittedSegmentInfos = store().readLastCommittedSegmentsInfo(); + final String translogUUID = lastCommittedSegmentInfos.userData.get(TRANSLOG_UUID_KEY); + final long checkpoint = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + Translog.createEmptyTranslog( + shardPath().resolveTranslog(), + shardId(), + checkpoint, + getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); } } // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java index 42bda11d75783..7b5be9505f27a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java @@ -147,7 +147,11 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil if (actualUUID.bytesEquals(expectedUUID) == false) { throw new TranslogCorruptedException( path.toString(), - "expected shard UUID " + expectedUUID + " but got: " + actualUUID + " this translog file belongs to a different translog" + "expected shard UUID " + + translogUUID + + " but got: " + + translogHeader.translogUUID + + " this translog file belongs to a different translog" ); } return translogHeader; From d34b35272fa19bc53cb0f6843a92d2d1ee4e3b47 Mon Sep 17 00:00:00 2001 From: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Date: Fri, 15 Sep 2023 22:03:25 +0530 Subject: [PATCH 08/23] Fix broken backward compatibility from 2.7 for IndexSorted field indices (#10045) * Fix broken backward comparibility from 2.7 for IndexSorted field indices Signed-off-by: gashutos * Adding CHANGELOG Signed-off-by: gashutos * Update server/src/main/java/org/opensearch/index/IndexSettings.java Co-authored-by: Andriy Redko Signed-off-by: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> * Removing unwanted logs Signed-off-by: gashutos * Removing unwanted logs Signed-off-by: gashutos * Adding index sort as part of mixed cluster to test this scenario Signed-off-by: gashutos * Removing optimization disable logic Signed-off-by: gashutos * Correcting some comments & version check to before( V_2_7_0) instead onOrBefire(V_2_6_1) since Signed-off-by: gashutos * Resolving spotless check error Signed-off-by: gashutos * Fixing broken UT - last minute checkin without copile Signed-off-by: gashutos * Improving code coverage to make codcov happy Signed-off-by: gashutos * Correcting typos and adding more tests Signed-off-by: gashutos * Removing unwanted imports Signed-off-by: gashutos --------- Signed-off-by: gashutos Signed-off-by: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../org/opensearch/backwards/IndexingIT.java | 32 ++++++-- .../org/opensearch/index/IndexSettings.java | 17 +++++ .../org/opensearch/index/IndexSortConfig.java | 8 +- .../index/fielddata/IndexFieldData.java | 7 ++ .../fielddata/IndexNumericFieldData.java | 21 +++++- .../opensearch/index/IndexServiceTests.java | 74 +++++++++++++++++++ .../AbstractFieldDataImplTestCase.java | 22 ++++++ 8 files changed, 173 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08f3c3abd4162..633f005a0883d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/9725)) ### Security diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 686fc78dcec8a..13c2daeec37af 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -67,13 +67,14 @@ public class IndexingIT extends OpenSearchRestTestCase { protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); + private static final String TEST_MAPPING = createTestMapping(); private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; Request request = new Request("PUT", index + "/_doc/" + id); - request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\", \"sortfield\": \""+ randomIntBetween(0, numDocs) + "\"}"); assertOK(client().performRequest(request)); } return numDocs; @@ -129,9 +130,10 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish int docCount = 200; @@ -178,9 +180,10 @@ public void testIndexingWithReplicaOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.exclude._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish printClusterRouting(); @@ -214,11 +217,12 @@ public void testIndexVersionPropagation() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { @@ -300,10 +304,11 @@ public void testSeqNoCheckpoints() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { int numDocs = 0; @@ -382,10 +387,11 @@ public void testUpdateSnapshotStatus() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-snapshot-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); indexDocs(index, 0, between(50, 100)); ensureGreen(index); assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); @@ -419,7 +425,8 @@ public void testSyncedFlushTransition() throws Exception { createIndex(index, Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes).build()); + .putList("index.sort.field", "sortfield") + .put("index.routing.allocation.include._name", newNodes).build(), TEST_MAPPING); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try (RestClient oldNodeClient = buildClient(restClientSettings(), @@ -664,4 +671,15 @@ public String toString() { '}'; } } + + private static String createTestMapping() { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }"; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 03c71351294d5..1e4224c314f05 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -63,6 +63,7 @@ import java.util.function.Function; import java.util.function.UnaryOperator; +import static org.opensearch.Version.V_2_7_0; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; @@ -660,6 +661,7 @@ public final class IndexSettings { private volatile long retentionLeaseMillis; private volatile String defaultSearchPipeline; + private final boolean widenIndexSortType; /** * The maximum age of a retention lease before it is considered expired. @@ -857,6 +859,13 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); setMergeOnFlushPolicy(scopedSettings.get(INDEX_MERGE_ON_FLUSH_POLICY)); defaultSearchPipeline = scopedSettings.get(DEFAULT_SEARCH_PIPELINE); + /* There was unintentional breaking change got introduced with [OpenSearch-6424](https://github.com/opensearch-project/OpenSearch/pull/6424) (version 2.7). + * For indices created prior version (prior to 2.7) which has IndexSort type, they used to type cast the SortField.Type + * to higher bytes size like integer to long. This behavior was changed from OpenSearch 2.7 version not to + * up cast the SortField to gain some sort query optimizations. + * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. + */ + widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( @@ -1652,4 +1661,12 @@ public String getDefaultSearchPipeline() { public void setDefaultSearchPipeline(String defaultSearchPipeline) { this.defaultSearchPipeline = defaultSearchPipeline; } + + /** + * Returns true if we need to maintain backward compatibility for index sorted indices created prior to version 2.7 + * @return boolean + */ + public boolean shouldWidenIndexSortType() { + return this.widenIndexSortType; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index f73f96df4f9ad..83192052564f3 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -143,6 +143,7 @@ private static MultiValueMode parseMultiValueMode(String value) { // visible for tests final FieldSortSpec[] sortSpecs; + final boolean shouldWidenIndexSortType; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); @@ -182,6 +183,7 @@ public IndexSortConfig(IndexSettings indexSettings) { sortSpecs[i].missingValue = missingValues.get(i); } } + this.shouldWidenIndexSortType = indexSettings.shouldWidenIndexSortType(); } /** @@ -230,7 +232,11 @@ public Sort buildIndexSort( if (fieldData == null) { throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); } - sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + if (this.shouldWidenIndexSortType == true) { + sortFields[i] = fieldData.wideSortField(sortSpec.missingValue, mode, null, reverse); + } else { + sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + } validateIndexSortField(sortFields[i]); } return new Sort(sortFields); diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java index f9db28a2c56fe..81d4ce2dd8772 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java @@ -94,6 +94,13 @@ public interface IndexFieldData { */ SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse); + /** + * Returns the {@link SortField} to use for index sorting where we widen the sort field type to higher or equal bytes. + */ + default SortField wideSortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + return sortField(missingValue, sortMode, nested, reverse); + } + /** * Build a sort implementation specialized for aggregations. */ diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index ae8ffd8fe6b97..b4e90b8ab570a 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -151,6 +151,25 @@ public final SortField sortField(Object missingValue, MultiValueMode sortMode, N return sortField(getNumericType(), missingValue, sortMode, nested, reverse); } + @Override + public final SortField wideSortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + // This is to support backward compatibility, the minimum number of bytes prior to OpenSearch 2.7 were 16 bytes, + // i.e all sort fields were upcasted to Long/Double with 16 bytes. + // Now from OpenSearch 2.7, the minimum number of bytes for sort field is 8 bytes, so if it comes as SortField INT, + // we need to up cast it to LONG to support backward compatibility info stored in segment info + if (getNumericType().sortFieldType == SortField.Type.INT) { + XFieldComparatorSource source = comparatorSource(NumericType.LONG, missingValue, sortMode, nested); + SortedNumericSelector.Type selectorType = sortMode == MultiValueMode.MAX + ? SortedNumericSelector.Type.MAX + : SortedNumericSelector.Type.MIN; + SortField sortField = new SortedNumericSortField(getFieldName(), SortField.Type.LONG, reverse, selectorType); + sortField.setMissingValue(source.missingObject(missingValue, reverse)); + return sortField; + } + // If already more than INT, up cast not needed. + return sortField(getNumericType(), missingValue, sortMode, nested, reverse); + } + /** * Builds a {@linkplain BucketedSort} for the {@code targetNumericType}, * casting the values if their native type doesn't match. @@ -224,7 +243,7 @@ private XFieldComparatorSource comparatorSource( source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } if (targetNumericType != getNumericType()) { - source.disableSkipping(); // disable skipping logic for caste of sort field + source.disableSkipping(); // disable skipping logic for cast of sort field } return source; } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index 1b8e1abb1bf1b..db9f4bd305c79 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -33,7 +33,9 @@ package org.opensearch.index; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.opensearch.Version; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; @@ -526,4 +528,76 @@ public void testUpdateRemoteTranslogBufferIntervalDynamically() { indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())); } + + public void testIndexSort() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be remained to int sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.INT); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + public void testIndexSortBackwardCompatible() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_2_6_1) + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be converted to long sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + private static String createTestMapping(String type) { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \" + type + \"\n" + + " }\n" + + " }"; + } } diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java index 1ffacf98a6836..2b44e759f4ff9 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.Strings; @@ -144,6 +145,27 @@ public void testSingleValueAllSet() throws Exception { } } + public void testWideSortField() throws Exception { + if (this instanceof NoOrdinalsStringFieldDataTests || this instanceof PagedBytesStringFieldDataTests) { + return; // Numeric types are not supported there. + } + // integer to long widening should happen + IndexFieldData indexFieldData = getForField("int", "value"); + SortField sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // long to long no widening should happen + indexFieldData = getForField("long", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // float to float no widening should happen + indexFieldData = getForField("float", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.FLOAT); + + } + protected abstract void fillSingleValueWithMissing() throws Exception; public void assertValues(SortedBinaryDocValues values, int docId, BytesRef... actualValues) throws IOException { From 70a582fcbe82e9b4250e29cbf7d6db470370906a Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 15 Sep 2023 15:38:42 -0700 Subject: [PATCH 09/23] Add async read support for S3 plugin (#9694) * Add async read support for S3 plugin Signed-off-by: Kunal Kotwani (cherry picked from commit 03ddc8a6b07221b1ed60ad5627939d1f957b1c49) Signed-off-by: Kunal Kotwani * Move functionality to S3BlobContainer Signed-off-by: Kunal Kotwani --------- Signed-off-by: Kunal Kotwani --- CHANGELOG.md | 1 + .../repositories/s3/S3BlobContainer.java | 109 +++++- .../repositories/s3/utils/HttpRangeUtils.java | 22 ++ .../s3/S3BlobStoreContainerTests.java | 310 +++++++++++++++++- .../s3/utils/HttpRangeUtilsTests.java | 29 ++ 5 files changed, 458 insertions(+), 13 deletions(-) create mode 100644 plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 633f005a0883d..337573480adfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) ### Dependencies - Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index bb1643faecc95..2911a018df337 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -32,6 +32,8 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; @@ -44,10 +46,15 @@ import software.amazon.awssdk.services.s3.model.Delete; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.ObjectAttributes; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Error; @@ -63,6 +70,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -75,11 +83,13 @@ import org.opensearch.common.blobstore.support.AbstractBlobContainer; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.UploadRequest; +import org.opensearch.repositories.s3.utils.HttpRangeUtils; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -212,9 +222,45 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp } } + @ExperimentalApi @Override public void readBlobAsync(String blobName, ActionListener listener) { - throw new UnsupportedOperationException(); + try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { + final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); + final String bucketName = blobStore.bucket(); + + final GetObjectAttributesResponse blobMetadata = getBlobMetadata(s3AsyncClient, bucketName, blobName).get(); + + final long blobSize = blobMetadata.objectSize(); + final int numberOfParts = blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum().checksumCRC32(); + + final List blobPartStreams = new ArrayList<>(); + final List> blobPartInputStreamFutures = new ArrayList<>(); + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + blobPartInputStreamFutures.add(getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, partNumber)); + } + + CompletableFuture.allOf(blobPartInputStreamFutures.toArray(CompletableFuture[]::new)).whenComplete((unused, throwable) -> { + if (throwable == null) { + listener.onResponse( + new ReadContext( + blobSize, + blobPartInputStreamFutures.stream().map(CompletableFuture::join).collect(Collectors.toList()), + blobChecksum + ) + ); + } else { + Exception ex = throwable.getCause() instanceof Exception + ? (Exception) throwable.getCause() + : new Exception(throwable.getCause()); + listener.onFailure(ex); + } + }); + } catch (Exception ex) { + listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); + } } public boolean remoteIntegrityCheckSupported() { @@ -633,4 +679,65 @@ static Tuple numberOfMultiparts(final long totalSize, final long par return Tuple.tuple(parts + 1, remaining); } } + + /** + * Fetches a part of the blob from the S3 bucket and transforms it to an {@link InputStreamContainer}, which holds + * the stream and its related metadata. + * @param s3AsyncClient Async client to be utilized to fetch the object part + * @param bucketName Name of the S3 bucket + * @param blobName Identifier of the blob for which the parts will be fetched + * @param partNumber Part number for the blob to be retrieved + * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. + */ + CompletableFuture getBlobPartInputStreamContainer( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobName, + int partNumber + ) { + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder() + .bucket(bucketName) + .key(blobName) + .partNumber(partNumber); + + return SocketAccess.doPrivileged( + () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) + .thenApply(S3BlobContainer::transformResponseToInputStreamContainer) + ); + } + + /** + * Transforms the stream response object from S3 into an {@link InputStreamContainer} + * @param streamResponse Response stream object from S3 + * @return {@link InputStreamContainer} containing the stream and stream metadata + */ + // Package-Private for testing. + static InputStreamContainer transformResponseToInputStreamContainer(ResponseInputStream streamResponse) { + final GetObjectResponse getObjectResponse = streamResponse.response(); + final String contentRange = getObjectResponse.contentRange(); + final Long contentLength = getObjectResponse.contentLength(); + if (contentRange == null || contentLength == null) { + throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); + } + final Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()); + return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); + } + + /** + * Retrieves the metadata like checksum, object size and parts for the provided blob within the S3 bucket. + * @param s3AsyncClient Async client to be utilized to fetch the metadata + * @param bucketName Name of the S3 bucket + * @param blobName Identifier of the blob for which the metadata will be fetched + * @return A future containing the metadata within {@link GetObjectAttributesResponse} + */ + CompletableFuture getBlobMetadata(S3AsyncClient s3AsyncClient, String bucketName, String blobName) { + // Fetch blob metadata - part info, size, checksum + final GetObjectAttributesRequest getObjectAttributesRequest = GetObjectAttributesRequest.builder() + .bucket(bucketName) + .key(blobName) + .objectAttributes(ObjectAttributes.CHECKSUM, ObjectAttributes.OBJECT_SIZE, ObjectAttributes.OBJECT_PARTS) + .build(); + + return SocketAccess.doPrivileged(() -> s3AsyncClient.getObjectAttributes(getObjectAttributesRequest)); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java index 40aec7d52847b..2e2fc9b86a45b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java @@ -8,7 +8,29 @@ package org.opensearch.repositories.s3.utils; +import software.amazon.awssdk.core.exception.SdkException; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + public final class HttpRangeUtils { + private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes\\s+(\\d+)-\\d+[/\\d*]+$"); + + /** + * Parses the content range header string value to calculate the start (offset) of the HTTP response. + * Tests against the RFC9110 specification of content range string. + * Sample values: "bytes 0-10/200", "bytes 0-10/*" + * Details here + * @param headerValue Header content range string value from the HTTP response + * @return Start (Offset) value of the HTTP response + */ + public static Long getStartOffsetFromRangeHeader(String headerValue) { + Matcher matcher = RANGE_PATTERN.matcher(headerValue); + if (!matcher.find()) { + throw SdkException.create("Regex match for Content-Range header {" + headerValue + "} failed", new RuntimeException()); + } + return Long.parseLong(matcher.group(1)); + } /** * Provides a byte range string per RFC 9110 diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 1c4936cae7eba..a87c060dcc60a 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -32,11 +32,15 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.Checksum; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; @@ -44,6 +48,11 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; @@ -61,15 +70,18 @@ import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; -import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -86,14 +98,19 @@ import java.util.NoSuchElementException; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -882,17 +899,6 @@ public void onFailure(Exception e) {} } } - public void testAsyncBlobDownload() { - final S3BlobStore blobStore = mock(S3BlobStore.class); - final BlobPath blobPath = mock(BlobPath.class); - final String blobName = "test-blob"; - - final UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> { - final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - blobContainer.readBlobAsync(blobName, new PlainActionFuture<>()); - }); - } - public void testListBlobsByPrefixInLexicographicOrderWithNegativeLimit() throws IOException { testListBlobsByPrefixInLexicographicOrder(-5, 0, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } @@ -912,4 +918,284 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSiz public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + + public void testReadBlobAsync() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + final int partSize = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + ); + final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( + 10000L, + mock(ExecutorService.class), + mock(ExecutorService.class) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectPartResponse(s3AsyncClient, bucketName, blobName, objectPartCount, partSize, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(objectPartCount, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + for (int partNumber = 1; partNumber < objectPartCount; partNumber++) { + InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber); + final int offset = partNumber * partSize; + assertEquals(partSize, inputStreamContainer.getContentLength()); + assertEquals(offset, inputStreamContainer.getOffset()); + assertEquals(partSize, inputStreamContainer.getInputStream().readAllBytes().length); + } + } + + public void testReadBlobAsyncFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, null) + ); + final AsyncTransferManager asyncTransferManager = new AsyncTransferManager( + 10000L, + mock(ExecutorService.class), + mock(ExecutorService.class) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + when(blobStore.getAsyncTransferManager()).thenReturn(asyncTransferManager); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenThrow(new RuntimeException()); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + + public void testGetBlobMetadata() throws Exception { + final String checksum = randomAlphaOfLengthBetween(1, 10); + final long objectSize = 100L; + final int objectPartCount = 10; + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CompletableFuture responseFuture = blobContainer.getBlobMetadata(s3AsyncClient, bucketName, blobName); + GetObjectAttributesResponse objectAttributesResponse = responseFuture.get(); + + assertEquals(checksum, objectAttributesResponse.checksum().checksumCRC32()); + assertEquals(Long.valueOf(objectSize), objectAttributesResponse.objectSize()); + assertEquals(Integer.valueOf(objectPartCount), objectAttributesResponse.objectParts().totalPartsCount()); + } + + public void testGetBlobPartInputStream() throws Exception { + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final long contentLength = 10L; + final String contentRange = "bytes 0-10/100"; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).contentRange(contentRange).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + when( + s3AsyncClient.getObject( + any(GetObjectRequest.class), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) + .get(); + + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + public void testTransformResponseToInputStreamContainer() throws Exception { + final String contentRange = "bytes 0-10/100"; + final long contentLength = 10L; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + + ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange)); + + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); + ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength)); + + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + private void mockObjectPartResponse( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobName, + int totalNumberOfParts, + int partSize, + long objectSize + ) { + for (int partNumber = 1; partNumber <= totalNumberOfParts; partNumber++) { + final int start = (partNumber - 1) * partSize; + final int end = partNumber * partSize; + final String contentRange = "bytes " + start + "-" + end + "/" + objectSize; + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(partSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder() + .contentLength((long) partSize) + .contentRange(contentRange) + .build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).partNumber(partNumber).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + } + } + + private static class CountingCompletionListener implements ActionListener { + private int responseCount; + private int failureCount; + private T response; + private Exception exception; + + @Override + public void onResponse(T response) { + this.response = response; + responseCount++; + } + + @Override + public void onFailure(Exception e) { + exception = e; + failureCount++; + } + + public int getResponseCount() { + return responseCount; + } + + public int getFailureCount() { + return failureCount; + } + + public T getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java new file mode 100644 index 0000000000000..9a4267c5266e5 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3.utils; + +import software.amazon.awssdk.core.exception.SdkException; + +import org.opensearch.test.OpenSearchTestCase; + +public final class HttpRangeUtilsTests extends OpenSearchTestCase { + + public void testFromHttpRangeHeader() { + String headerValue = "bytes 0-10/200"; + Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + headerValue = "bytes 0-10/*"; + offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + final String invalidHeaderValue = "bytes */*"; + assertThrows(SdkException.class, () -> HttpRangeUtils.getStartOffsetFromRangeHeader(invalidHeaderValue)); + } +} From 5fdd418be5717cc91023b071075fb1ab525ba8e4 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Sat, 16 Sep 2023 12:59:56 +0530 Subject: [PATCH 10/23] skip overriding routing table when it already contains entries with remote recovery source (#9962) * skip overriding routing table when it already contains entries with remote recovery source Signed-off-by: bansvaru --- .../gateway/ClusterStateUpdaters.java | 17 +- .../recovery/RemoteStoreRestoreService.java | 2 +- .../gateway/ClusterStateUpdatersTests.java | 320 ++++++++++++++++++ 3 files changed, 337 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java index 1563ac84bdd1c..4c562b348f141 100644 --- a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.settings.ClusterSettings; @@ -120,7 +121,21 @@ static ClusterState updateRoutingTable(final ClusterState state) { // initialize all index routing tables as empty final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); for (final IndexMetadata cursor : state.metadata().indices().values()) { - routingTableBuilder.addAsRecovery(cursor); + // Whether IndexMetadata is recovered from local disk or remote it doesn't matter to us at this point. + // We are only concerned about index data recovery here. Which is why we only check for remote store enabled and not for remote + // cluster state enabled. + if (cursor.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false + || state.routingTable().hasIndex(cursor.getIndex()) == false + || state.routingTable() + .index(cursor.getIndex()) + .shardsMatchingPredicateCount( + shardRouting -> shardRouting.primary() + // We need to ensure atleast one of the primaries is being recovered from remote. + // This ensures we have gone through the RemoteStoreRestoreService and routing table is updated + && shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) == 0) { + routingTableBuilder.addAsRecovery(cursor); + } } // start with 0 based versions for routing table routingTableBuilder.version(0); diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index d05242a3aeaf7..94fd08b99ac58 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -187,7 +187,7 @@ private RemoteRestoreResult executeRestore( IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); IndexMetadata updatedIndexMetadata = indexMetadata; - if (restoreAllShards || metadataFromRemoteStore) { + if (metadataFromRemoteStore == false && restoreAllShards) { updatedIndexMetadata = IndexMetadata.builder(indexMetadata) .state(IndexMetadata.State.OPEN) .version(1 + indexMetadata.getVersion()) diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index c83da46b23fb1..9b3fd45245ef7 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -40,6 +40,10 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; @@ -48,10 +52,12 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.repositories.IndexId; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; @@ -269,6 +275,320 @@ public void testUpdateRoutingTable() { } } + public void testSkipRoutingTableUpdateWhenRemoteRecovery() { + final int numOfShards = randomIntBetween(1, 10); + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .build() + ); + + // Test remote index routing table is generated with ExistingStoreRecoverySource if no routing table is present + { + final Index index = remoteMetadata.getIndex(); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .build(); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + + // Test remote index routing table is overridden if recovery source is not RemoteStoreRecoverySource + { + IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsNew(remoteMetadata); + final Index index = remoteMetadata.getIndex(); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .routingTable(new RoutingTable.Builder().add(remoteBuilderWithoutRemoteRecovery.build()).build()) + .build(); + assertTrue(initialState.routingTable().hasIndex(index)); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + + // Test routing table update is skipped for a remote index + { + IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + new HashMap<>(), + true + ); + final Index index = remoteMetadata.getIndex(); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .routingTable(new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()).build()) + .build(); + assertTrue(initialState.routingTable().hasIndex(index)); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + + } + + // Test reset routing table for 2 indices - one remote and one non remote. + // Routing table for non remote index should be updated and remote index routing table should remain intact + { + final IndexMetadata nonRemoteMetadata = createIndexMetadata( + "test-nonremote", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards).build() + ); + IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + new HashMap<>(), + true + ); + IndexRoutingTable.Builder nonRemoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder(nonRemoteMetadata.getIndex()) + .initializeAsNew(nonRemoteMetadata); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .metadata(Metadata.builder().put(nonRemoteMetadata, false).build()) + .routingTable( + new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) + .add(nonRemoteBuilderWithoutRemoteRecovery.build()) + .build() + ) + .build(); + assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); + assertTrue(initialState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); + final ClusterState newState = updateRoutingTable(initialState); + assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); + assertTrue(newState.routingTable().hasIndex(nonRemoteMetadata.getIndex())); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + IndexRoutingTable newNonRemoteIndexRoutingTable = newState.routingTable().index(nonRemoteMetadata.getIndex()); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + 0, + newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newNonRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + } + + // Test reset routing table for 2 indices, both remote backed but only once index has RemoteStoreRecoverySource. + // Routing table for only remote index without RemoteStoreRecoverySource should be updated + { + final IndexMetadata remoteWithoutRemoteRecoveryMetadata = createIndexMetadata( + "test-remote-without-recovery", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build() + ); + IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + new HashMap<>(), + true + ); + IndexRoutingTable.Builder remoteBuilderWithoutRemoteRecovery = new IndexRoutingTable.Builder( + remoteWithoutRemoteRecoveryMetadata.getIndex() + ).initializeAsNew(remoteWithoutRemoteRecoveryMetadata); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .metadata(Metadata.builder().put(remoteWithoutRemoteRecoveryMetadata, false).build()) + .routingTable( + new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()) + .add(remoteBuilderWithoutRemoteRecovery.build()) + .build() + ) + .build(); + assertTrue(initialState.routingTable().hasIndex(remoteMetadata.getIndex())); + assertTrue(initialState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); + final ClusterState newState = updateRoutingTable(initialState); + assertTrue(newState.routingTable().hasIndex(remoteMetadata.getIndex())); + assertTrue(newState.routingTable().hasIndex(remoteWithoutRemoteRecoveryMetadata.getIndex())); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + IndexRoutingTable newRemoteWithoutRemoteRecoveryIndexRoutingTable = newState.routingTable() + .index(remoteWithoutRemoteRecoveryMetadata.getIndex()); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + 0, + newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteWithoutRemoteRecoveryIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + } + } + public void testMixCurrentAndRecoveredState() { final ClusterState currentState = ClusterState.builder(ClusterState.EMPTY_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) From d7aa6dd9c7759bbfedd7263d453d3ac6c92fab7d Mon Sep 17 00:00:00 2001 From: Chaitanya Gohel <104654647+gashutos@users.noreply.github.com> Date: Sat, 16 Sep 2023 20:49:23 +0530 Subject: [PATCH 11/23] Finxing broken UT (#10087) Signed-off-by: Gohel Co-authored-by: Gohel --- server/src/main/java/org/opensearch/index/IndexService.java | 2 ++ .../src/main/java/org/opensearch/index/IndexSortConfig.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 80ead0a333ba3..38456ad3e43a5 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -240,8 +240,10 @@ public IndexService( if (indexSettings.getIndexSortConfig().hasIndexSort()) { // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. // The sort order is validated right after the merge of the mapping later in the process. + boolean shouldWidenIndexSortType = this.indexSettings.shouldWidenIndexSortType(); this.indexSortSupplier = () -> indexSettings.getIndexSortConfig() .buildIndexSort( + shouldWidenIndexSortType, mapperService::fieldType, (fieldType, searchLookup) -> indexFieldData.getForField(fieldType, indexFieldData.index().getName(), searchLookup) ); diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index 83192052564f3..763ed34a1e6a6 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -143,7 +143,6 @@ private static MultiValueMode parseMultiValueMode(String value) { // visible for tests final FieldSortSpec[] sortSpecs; - final boolean shouldWidenIndexSortType; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); @@ -183,7 +182,6 @@ public IndexSortConfig(IndexSettings indexSettings) { sortSpecs[i].missingValue = missingValues.get(i); } } - this.shouldWidenIndexSortType = indexSettings.shouldWidenIndexSortType(); } /** @@ -202,6 +200,7 @@ public boolean hasPrimarySortOnField(String field) { * or returns null if this index has no sort. */ public Sort buildIndexSort( + boolean shouldWidenIndexSortType, Function fieldTypeLookup, BiFunction, IndexFieldData> fieldDataLookup ) { @@ -232,7 +231,7 @@ public Sort buildIndexSort( if (fieldData == null) { throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); } - if (this.shouldWidenIndexSortType == true) { + if (shouldWidenIndexSortType == true) { sortFields[i] = fieldData.wideSortField(sortSpec.missingValue, mode, null, reverse); } else { sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); From b5cc00222816429ed673d9fb46c434cae1be862b Mon Sep 17 00:00:00 2001 From: Ashish Date: Mon, 18 Sep 2023 21:37:41 +0530 Subject: [PATCH 12/23] Optimize read write lock constructs during translog upload to remote store (#9636) Signed-off-by: Ashish Singh --- .../index/translog/RemoteFsTranslog.java | 85 +++++++++++++------ .../TranslogCheckpointTransferSnapshot.java | 1 - .../transfer/TranslogTransferManager.java | 2 +- .../listener/TranslogTransferListener.java | 2 +- .../index/translog/RemoteFsTranslogTests.java | 1 - .../TranslogTransferManagerTests.java | 3 + 6 files changed, 63 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index c488127857ed5..a633cb0787f27 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -60,6 +60,7 @@ public class RemoteFsTranslog extends Translog { private final BooleanSupplier primaryModeSupplier; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private volatile long maxRemoteTranslogGenerationUploaded; + private final Object uploadMutex = new Object(); private volatile long minSeqNoToKeep; @@ -237,11 +238,20 @@ public static TranslogTransferManager buildTranslogTransferManager( @Override public boolean ensureSynced(Location location) throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { - assert location.generation <= current.getGeneration(); - if (location.generation == current.getGeneration()) { - ensureOpen(); - return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); + try { + boolean shouldUpload = false; + try (ReleasableLock ignored = writeLock.acquire()) { + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + if (prepareForUpload(location.generation) == false) { + return false; + } + shouldUpload = true; + } + } + if (shouldUpload) { + return performUpload(primaryTermSupplier.getAsLong(), location.generation); } } catch (final Exception ex) { closeOnTragicEvent(ex); @@ -256,10 +266,12 @@ public void rollGeneration() throws IOException { if (current.totalOperations() == 0 && primaryTermSupplier.getAsLong() == current.getPrimaryTerm()) { return; } - prepareAndUpload(primaryTermSupplier.getAsLong(), null); + if (prepareForUpload(null)) { + performUpload(primaryTermSupplier.getAsLong(), null); + } } - private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + private boolean prepareForUpload(Long generation) throws IOException { try (Releasable ignored = writeLock.acquire()) { if (generation == null || generation == current.getGeneration()) { try { @@ -275,23 +287,41 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc closeOnTragicEvent(e); throw e; } - } else if (generation < current.getGeneration()) { - return false; - } + return true; + } else return generation >= current.getGeneration(); + } + } - // Do we need remote writes in sync fashion ? - // If we don't , we should swallow FileAlreadyExistsException while writing to remote store - // and also verify for same during primary-primary relocation - // Writing remote in sync fashion doesn't hurt as global ckp update - // is not updated in remote translog except in primary to primary recovery. - if (generation == null) { - if (closed.get() == false) { - return upload(primaryTerm, current.getGeneration() - 1); + /** + * This method does the remote store upload by first acquiring the lock on the uploadMutex monitor. The synchronized + * is required to restrict multiple uploads happening concurrently. The read lock is required to ensure that the + * underlying translog readers are not deleted and the current writer is not converted to a reader at the time of + * upload. + * + * @param primaryTerm current primary term + * @param generation current generation + * @return true if upload is successful + * @throws IOException if the upload fails due to any underlying exceptions. + */ + private boolean performUpload(Long primaryTerm, Long generation) throws IOException { + synchronized (uploadMutex) { + try (Releasable ignored = readLock.acquire()) { + // Do we need remote writes in sync fashion ? + // If we don't , we should swallow FileAlreadyExistsException while writing to remote store + // and also verify for same during primary-primary relocation + // Writing remote in sync fashion doesn't hurt as global ckp update + // is not updated in remote translog except in primary to primary recovery. + long generationToUpload; + if (generation == null) { + if (closed.get() == false) { + generationToUpload = current.getGeneration() - 1; + } else { + generationToUpload = current.getGeneration(); + } } else { - return upload(primaryTerm, current.getGeneration()); + generationToUpload = generation; } - } else { - return upload(primaryTerm, generation); + return upload(primaryTerm, generationToUpload); } } } @@ -343,8 +373,8 @@ private boolean syncToDisk() throws IOException { @Override public void sync() throws IOException { try { - if (syncToDisk() || syncNeeded()) { - prepareAndUpload(primaryTermSupplier.getAsLong(), null); + if ((syncToDisk() || syncNeeded()) && prepareForUpload(null)) { + performUpload(primaryTermSupplier.getAsLong(), null); } } catch (final Exception e) { tragedy.setTragicException(e); @@ -528,8 +558,6 @@ private class RemoteFsTranslogTransferListener implements TranslogTransferListen @Override public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); maxRemoteTranslogGenerationUploaded = generation; minRemoteGenReferenced = getMinFileGeneration(); logger.trace("uploaded translog for {} {} ", primaryTerm, generation); @@ -537,13 +565,16 @@ public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOExcepti @Override public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); if (ex instanceof IOException) { throw (IOException) ex; } else { throw (RuntimeException) ex; } } + + @Override + public void close() { + transferReleasable.close(); + } } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 10dec13c81e1a..eb0eebb564b63 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -164,7 +164,6 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { translogTransferSnapshot.setMinTranslogGeneration(highestGenMinTranslogGeneration); assert this.primaryTerm == highestGenPrimaryTerm : "inconsistent primary term"; - assert this.generation == highestGeneration : " inconsistent generation "; final long finalHighestGeneration = highestGeneration; assert LongStream.iterate(lowestGeneration, i -> i + 1) .limit(highestGeneration) diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index fd4936603671c..fe6b5dab9937b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -109,7 +109,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans long prevUploadBytesSucceeded = remoteTranslogTransferTracker.getUploadBytesSucceeded(); long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); - try { + try (translogTransferListener) { toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); if (toUpload.isEmpty()) { diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java index 132d1adf916da..8805c16298d96 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java @@ -17,7 +17,7 @@ * * @opensearch.internal */ -public interface TranslogTransferListener { +public interface TranslogTransferListener extends AutoCloseable { /** * Invoked when the transfer of {@link TransferSnapshot} succeeds * @param transferSnapshot the transfer snapshot diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index de1b2990f0a50..3c654818ffc6c 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -108,7 +108,6 @@ import static org.mockito.Mockito.when; @LuceneTestCase.SuppressFileSystems("ExtrasFS") - public class RemoteFsTranslogTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 6fc4557a75675..0987201eb8602 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -168,6 +168,9 @@ public void onUploadComplete(TransferSnapshot transferSnapshot) { public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { translogTransferFailed.incrementAndGet(); } + + @Override + public void close() {} })); assertEquals(4, fileTransferSucceeded.get()); assertEquals(0, fileTransferFailed.get()); From 5b864c0e023647cf653300d638f60fc0136bf95b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:40:19 -0400 Subject: [PATCH 13/23] Bump com.zaxxer:SparseBitSet from 1.2 to 1.3 in /plugins/ingest-attachment (#10098) * Bump com.zaxxer:SparseBitSet in /plugins/ingest-attachment Bumps [com.zaxxer:SparseBitSet](https://github.com/brettwooldridge/SparseBitSet) from 1.2 to 1.3. - [Commits](https://github.com/brettwooldridge/SparseBitSet/compare/SparseBitSet-1.2...SparseBitSet-1.3) --- updated-dependencies: - dependency-name: com.zaxxer:SparseBitSet dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 | 1 - plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 337573480adfd..d374a725c84ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) - Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) - Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) @@ -106,4 +107,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f54c80e9f76c1..330a17c02bc7a 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -93,7 +93,7 @@ dependencies { // Microsoft Word files with visio diagrams api 'org.apache.commons:commons-math3:3.6.1' // POIs dependency - api 'com.zaxxer:SparseBitSet:1.2' + api 'com.zaxxer:SparseBitSet:1.3' } restResources { diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 deleted file mode 100644 index 5f1d015b87ac7..0000000000000 --- a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8467c813d442837fcaeddbc42cf5c5359fab4933 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 new file mode 100644 index 0000000000000..2803db7c91e30 --- /dev/null +++ b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 @@ -0,0 +1 @@ +533eac055afe3d5f614ea95e333afd6c2bde8f26 \ No newline at end of file From 2a5b124ee8ef4376d62c484b6cd3ea1d98ca75d1 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Tue, 19 Sep 2023 09:11:20 +0530 Subject: [PATCH 14/23] Fix cluster chaining during bootstrap (#10020) * Fix clusterUUID chaining logic Signed-off-by: Sooraj Sinha --- .../opensearch/gateway/GatewayMetaState.java | 43 ++++++- .../remote/ClusterMetadataManifest.java | 37 +++++- .../remote/RemoteClusterStateService.java | 100 ++++++++++++--- .../coordination/CoordinationStateTests.java | 36 +++++- .../GatewayMetaStatePersistedStateTests.java | 21 +++- .../remote/ClusterMetadataManifestTests.java | 21 +++- .../RemoteClusterStateServiceTests.java | 117 +++++++++++++++--- 7 files changed, 315 insertions(+), 60 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index e42ac8daa3b1c..6b26af148b2ea 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -78,6 +78,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -171,11 +172,12 @@ public void start( // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote // If there is no valid state on remote, continue with initial empty state // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { - String lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( clusterState.getClusterName().value() ); - if (!ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID)) { + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { // Load state from remote final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( clusterState, @@ -186,7 +188,7 @@ public void start( clusterState = remoteRestoreResult.getClusterState(); } } - remotePersistedState = new RemotePersistedState(remoteClusterStateService); + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); } persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { @@ -647,9 +649,11 @@ public static class RemotePersistedState implements PersistedState { private ClusterState lastAcceptedState; private ClusterMetadataManifest lastAcceptedManifest; private final RemoteClusterStateService remoteClusterStateService; + private String previousClusterUUID; - public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService) { + public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService, final String previousClusterUUID) { this.remoteClusterStateService = remoteClusterStateService; + this.previousClusterUUID = previousClusterUUID; } @Override @@ -674,7 +678,26 @@ public void setLastAcceptedState(ClusterState clusterState) { try { final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { - manifest = remoteClusterStateService.writeFullMetadata(clusterState); + if (clusterState.metadata().clusterUUIDCommitted() == true) { + final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", + clusterState.metadata().clusterUUID() + ); + previousClusterUUID = ClusterState.UNKNOWN_UUID; + } + } + manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true : "Previous manifest and previous ClusterState are not in sync"; @@ -723,11 +746,19 @@ public void markLastAcceptedStateAsCommitted() { try { assert lastAcceptedState != null : "Last accepted state is not present"; assert lastAcceptedManifest != null : "Last accepted manifest is not present"; + ClusterState clusterState = lastAcceptedState; + if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false + && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { + Metadata.Builder metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); + metadataBuilder.clusterUUIDCommitted(true); + clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); + } final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( - lastAcceptedState, + clusterState, lastAcceptedManifest ); lastAcceptedManifest = committedManifest; + lastAcceptedState = clusterState; } catch (Exception e) { handleExceptionOnWrite(e); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 040c0663efbd9..40b16f3d6323b 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -42,6 +42,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField COMMITTED_FIELD = new ParseField("committed"); private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); + private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); private static long term(Object[] fields) { return (long) fields[0]; @@ -79,6 +80,10 @@ private static String previousClusterUUID(Object[] fields) { return (String) fields[8]; } + private static boolean clusterUUIDCommitted(Object[] fields) { + return (boolean) fields[9]; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( @@ -90,7 +95,8 @@ private static String previousClusterUUID(Object[] fields) { nodeId(fields), committed(fields), indices(fields), - previousClusterUUID(fields) + previousClusterUUID(fields), + clusterUUIDCommitted(fields) ) ); @@ -108,6 +114,7 @@ private static String previousClusterUUID(Object[] fields) { INDICES_FIELD ); PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); } private final List indices; @@ -119,6 +126,7 @@ private static String previousClusterUUID(Object[] fields) { private final String nodeId; private final boolean committed; private final String previousClusterUUID; + private final boolean clusterUUIDCommitted; public List getIndices() { return indices; @@ -156,6 +164,10 @@ public String getPreviousClusterUUID() { return previousClusterUUID; } + public boolean isClusterUUIDCommitted() { + return clusterUUIDCommitted; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -165,7 +177,8 @@ public ClusterMetadataManifest( String nodeId, boolean committed, List indices, - String previousClusterUUID + String previousClusterUUID, + boolean clusterUUIDCommitted ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -176,6 +189,7 @@ public ClusterMetadataManifest( this.committed = committed; this.indices = Collections.unmodifiableList(indices); this.previousClusterUUID = previousClusterUUID; + this.clusterUUIDCommitted = clusterUUIDCommitted; } public ClusterMetadataManifest(StreamInput in) throws IOException { @@ -188,6 +202,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.committed = in.readBoolean(); this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); this.previousClusterUUID = in.readString(); + this.clusterUUIDCommitted = in.readBoolean(); } public static Builder builder() { @@ -215,6 +230,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); + builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); return builder; } @@ -229,6 +245,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(committed); out.writeCollection(indices); out.writeString(previousClusterUUID); + out.writeBoolean(clusterUUIDCommitted); } @Override @@ -248,7 +265,8 @@ public boolean equals(Object o) { && Objects.equals(opensearchVersion, that.opensearchVersion) && Objects.equals(nodeId, that.nodeId) && Objects.equals(committed, that.committed) - && Objects.equals(previousClusterUUID, that.previousClusterUUID); + && Objects.equals(previousClusterUUID, that.previousClusterUUID) + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted); } @Override @@ -262,7 +280,8 @@ public int hashCode() { opensearchVersion, nodeId, committed, - previousClusterUUID + previousClusterUUID, + clusterUUIDCommitted ); } @@ -291,6 +310,7 @@ public static class Builder { private String nodeId; private String previousClusterUUID; private boolean committed; + private boolean clusterUUIDCommitted; public Builder indices(List indices) { this.indices = indices; @@ -341,6 +361,11 @@ public Builder previousClusterUUID(String previousClusterUUID) { return this; } + public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { + this.clusterUUIDCommitted = clusterUUIDCommitted; + return this; + } + public Builder() { indices = new ArrayList<>(); } @@ -355,6 +380,7 @@ public Builder(ClusterMetadataManifest manifest) { this.committed = manifest.committed; this.indices = new ArrayList<>(manifest.indices); this.previousClusterUUID = manifest.previousClusterUUID; + this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; } public ClusterMetadataManifest build() { @@ -367,7 +393,8 @@ public ClusterMetadataManifest build() { nodeId, committed, indices, - previousClusterUUID + previousClusterUUID, + clusterUUIDCommitted ); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index cf750bb11f3f8..dddc5376803a5 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -152,20 +152,13 @@ private BlobStoreTransferService getBlobStoreTransferService() { * @return A manifest object which contains the details of uploaded entity metadata. */ @Nullable - public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) throws IOException { + public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); return null; } - // should fetch the previous cluster UUID before writing full cluster state. - // Whenever a new election happens, a new leader will be elected and it might have stale previous UUID - final String previousClusterUUID = fetchPreviousClusterUUID( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ); - // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, @@ -436,7 +429,8 @@ private ClusterMetadataManifest uploadManifest( nodeId, committed, uploadedIndexMetadata, - previousClusterUUID + previousClusterUUID, + clusterState.metadata().clusterUUIDCommitted() ); writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); return manifest; @@ -582,7 +576,7 @@ public String getLastKnownUUIDFromRemote(String clusterName) { try { Set clusterUUIDs = getAllClusterUUIDs(clusterName); Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); - List validChain = createClusterChain(latestManifests); + List validChain = createClusterChain(latestManifests, clusterName); if (validChain.isEmpty()) { return ClusterState.UNKNOWN_UUID; } @@ -623,7 +617,7 @@ private Map getLatestManifestForAllClusterUUIDs * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain */ - private List createClusterChain(final Map manifestsByClusterUUID) { + private List createClusterChain(final Map manifestsByClusterUUID, final String clusterName) { final Map clusterUUIDGraph = manifestsByClusterUUID.values() .stream() .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); @@ -637,18 +631,29 @@ private List createClusterChain(final Map 1) { - throw new IllegalStateException( - String.format( - Locale.ROOT, - "The system has ended into multiple valid cluster states in the remote store. " - + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", - validClusterUUIDs - ) + // If the valid cluster UUIDs are more that 1, it means there was some race condition where + // more then 2 cluster manager nodes tried to become active cluster manager and published + // 2 cluster UUIDs which followed the same previous UUID. + final Map manifestsByClusterUUIDTrimmed = trimClusterUUIDs( + manifestsByClusterUUID, + validClusterUUIDs, + clusterName ); + if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + validClusterUUIDs + ) + ); + } + return createClusterChain(manifestsByClusterUUIDTrimmed, clusterName); } final List validChain = new ArrayList<>(); String currentUUID = validClusterUUIDs.get(0); - while (!ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { validChain.add(currentUUID); // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph currentUUID = clusterUUIDGraph.get(currentUUID); @@ -656,8 +661,61 @@ private List createClusterChain(final Map trimClusterUUIDs( + final Map latestManifestsByClusterUUID, + final List validClusterUUIDs, + final String clusterName + ) { + final Map trimmedUUIDs = new HashMap<>(latestManifestsByClusterUUID); + for (String clusterUUID : validClusterUUIDs) { + ClusterMetadataManifest currentManifest = trimmedUUIDs.get(clusterUUID); + // Here we compare the manifest of current UUID to that of previous UUID + // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, + // that means it was restored from previousUUID and no IndexMetadata update was performed on it. + if (ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { + if (currentManifest.getIndices().isEmpty()) { + trimmedUUIDs.remove(clusterUUID); + } + } else { + ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); + if (isMetadataEqual(currentManifest, previousManifest, clusterName)) { + trimmedUUIDs.remove(clusterUUID); + } + } + } + return trimmedUUIDs; + } + + private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + // todo clusterName can be set as final in the constructor + if (first.getIndices().size() != second.getIndices().size()) { + return false; + } + final Map secondIndices = second.getIndices() + .stream() + .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { + final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); + if (secondUploadedIndexMetadata == null) { + return false; + } + final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + if (firstIndexMetadata.equals(secondIndexMetadata) == false) { + return false; + } + } + return true; + } + private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { - return !manifest.isCommitted() && manifest.getIndices().isEmpty(); + return !manifest.isClusterUUIDCommitted(); } /** @@ -729,6 +787,7 @@ public IndexMetadataTransferException(String errorDesc, Throwable cause) { /** * Purges all remote cluster state against provided cluster UUIDs + * * @param clusterName name of the cluster * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged */ @@ -760,6 +819,7 @@ public void onFailure(Exception e) { /** * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests + * * @param clusterName name of the cluster * @param clusterUUID uuid of cluster state to refer to in remote * @param manifestsToRetain no of latest manifest files to keep in remote diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index d1c2dda615992..f37823d2c0c7d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -60,6 +60,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import static java.util.Collections.emptyMap; @@ -70,6 +71,9 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class CoordinationStateTests extends OpenSearchTestCase { @@ -925,6 +929,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + final String previousClusterUUID = "prev-cluster-uuid"; final ClusterMetadataManifest manifest = new ClusterMetadataManifest( 0L, 0L, @@ -934,13 +939,17 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep randomAlphaOfLength(10), false, Collections.emptyList(), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + true ); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)).thenReturn(manifest); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID)).thenReturn(manifest); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); - persistedStateRegistry.addPersistedState(PersistedStateType.REMOTE, new RemotePersistedState(remoteClusterStateService)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, previousClusterUUID) + ); String randomRepoName = "randomRepoName"; String stateRepoTypeAttributeKey = String.format( @@ -963,11 +972,28 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); coordinationState.handlePrePublish(clusterState); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState, previousClusterUUID); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); coordinationState.handlePreCommit(); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState, manifest); + ClusterState committedClusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).clusterUUIDCommitted(true).build()) + .build(); + // Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(committedClusterState, manifest); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any()); + assertThat(clusterStateCaptor.getValue().metadata().indices(), equalTo(committedClusterState.metadata().indices())); + assertThat(clusterStateCaptor.getValue().metadata().clusterUUID(), equalTo(committedClusterState.metadata().clusterUUID())); + assertThat(clusterStateCaptor.getValue().stateUUID(), equalTo(committedClusterState.stateUUID())); + assertThat( + clusterStateCaptor.getValue().coordinationMetadata().term(), + equalTo(committedClusterState.coordinationMetadata().term()) + ); + assertThat(clusterStateCaptor.getValue().version(), equalTo(committedClusterState.version())); + assertThat( + clusterStateCaptor.getValue().metadata().clusterUUIDCommitted(), + equalTo(committedClusterState.metadata().clusterUUIDCommitted()) + ); } public static CoordinationState createCoordinationState( diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 486717faaf864..c7ed1cb732154 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -716,10 +716,11 @@ Directory createDirectory(Path path) { public void testRemotePersistedState() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().clusterTerm(1L).stateVersion(5L).build(); - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any())).thenReturn(manifest); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); - CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); @@ -731,7 +732,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(clusterState); - Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -742,7 +743,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -752,14 +753,22 @@ public void testRemotePersistedState() throws IOException { assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(false)); + final ClusterState thirdClusterState = ClusterState.builder(secondClusterState) + .metadata(Metadata.builder(secondClusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).build()) + .build(); + remotePersistedState.setLastAcceptedState(thirdClusterState); + remotePersistedState.markLastAcceptedStateAsCommitted(); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); } public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); - Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any()); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); - CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); final long clusterTerm = randomNonNegativeLong(); final ClusterState clusterState = createClusterState( diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 9f8dde5ba9d45..66426c2a880a3 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -37,7 +37,8 @@ public void testClusterMetadataManifestXContent() throws IOException { "test-node-id", false, Collections.singletonList(uploadedIndexMetadata), - "prev-cluster-uuid" + "prev-cluster-uuid", + true ); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -60,7 +61,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { "B10RX1f5RJenMQvYccCgSQ", true, randomUploadedIndexMetadataList(), - "yfObdx8KSMKKrXf8UyHhM" + "yfObdx8KSMKKrXf8UyHhM", + true ); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -183,6 +185,21 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { ); } + { // Mutate cluster uuid committed + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUIDCommitted(false); + return builder.build(); + } + ); + } } private List randomUploadedIndexMetadataList() { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 9f5067420aab1..65166386733c6 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -135,7 +135,7 @@ public void teardown() throws Exception { public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().build(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); Assert.assertThat(manifest, nullValue()); } @@ -169,7 +169,7 @@ public void testWriteFullMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -190,6 +190,7 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); } public void testWriteFullMetadataInParallelSuccess() throws IOException { @@ -205,7 +206,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -216,6 +217,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -226,6 +228,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 1); assertEquals(writeContextArgumentCaptor.getAllValues().size(), 1); @@ -266,7 +269,7 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { remoteClusterStateService.start(); assertThrows( RemoteClusterStateService.IndexMetadataTransferException.class, - () -> remoteClusterStateService.writeFullMetadata(clusterState) + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); } @@ -571,12 +574,43 @@ public void testGetValidPreviousClusterUUID() throws IOException { public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { Map clusterUUIDsPointers = Map.of( + "cluster-uuid2", "cluster-uuid1", - ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid2", + "cluster-uuid5", + "cluster-uuid4" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid1", ClusterState.UNKNOWN_UUID, "cluster-uuid3", - "cluster-uuid2" + "cluster-uuid1" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + ClusterState.UNKNOWN_UUID ); mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); @@ -598,42 +632,92 @@ private void mockObjectsForGettingPreviousClusterUUID(Map cluste when(blobContainer3.path()).thenReturn(blobPath); mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + List uploadedIndexMetadataList1 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( "cluster-uuid1", clusterUUIDsPointers.get("cluster-uuid1"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList1 ); - mockBlobContainer(blobContainer1, clusterManifest1, Map.of()); + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata2 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1); + List uploadedIndexMetadataList2 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( "cluster-uuid2", clusterUUIDsPointers.get("cluster-uuid2"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList2 ); - mockBlobContainer(blobContainer2, clusterManifest2, Map.of()); + IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata4 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2); + List uploadedIndexMetadataList3 = List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( "cluster-uuid3", clusterUUIDsPointers.get("cluster-uuid3"), - randomAlphaOfLength(10) + randomAlphaOfLength(10), + uploadedIndexMetadataList3 ); - mockBlobContainer(blobContainer3, clusterManifest3, Map.of()); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap3 = Map.of("index-uuid1", indexMetadata5); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3); when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( uuidBlobContainer, blobContainer1, blobContainer1, + blobContainer3, + blobContainer3, blobContainer2, blobContainer2, - blobContainer3, - blobContainer3 + blobContainer1, + blobContainer2, + blobContainer1, + blobContainer2 ); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); } - private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUUID, String previousClusterUUID, String stateUUID) { + private ClusterMetadataManifest generateClusterMetadataManifest( + String clusterUUID, + String previousClusterUUID, + String stateUUID, + List uploadedIndexMetadata + ) { return ClusterMetadataManifest.builder() - .indices(List.of()) + .indices(uploadedIndexMetadata) .clusterTerm(1L) .stateVersion(1L) .stateUUID(stateUUID) @@ -642,6 +726,7 @@ private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUU .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) .previousClusterUUID(previousClusterUUID) .committed(true) + .clusterUUIDCommitted(true) .build(); } From 256ea418015c513a96390407ab0ddfa5704269b2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 19 Sep 2023 14:47:11 -0400 Subject: [PATCH 15/23] Update Apache Lucene to 9.8.0-snapshot-95cdd2e (#10031) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 2 +- ...lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...expressions-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...expressions-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...nalysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...nalysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...is-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...is-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...alysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...alysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...is-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...is-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...sis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...sis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...sis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...sis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + .../mapper/CorrelationVectorFieldMapper.java | 5 ++--- .../query/CorrelationQueryBuilderTests.java | 2 +- ...ysis-common-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...ward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...ne-grouping-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ne-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...highlighter-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...cene-memory-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...cene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...ene-queries-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...queryparser-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...ene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...tial-extras-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...tial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...e-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...e-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + ...ene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 | 1 - ...ene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 | 1 + .../node/tasks/ConcurrentSearchTasksIT.java | 5 +++-- .../index/engine/TranslogLeafReader.java | 6 +++--- .../comparators/UnsignedLongComparator.java | 18 ++++++++++-------- 52 files changed, 43 insertions(+), 41 deletions(-) delete mode 100644 libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index be09584b1dba1..4a31582bb7546 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-4373c3b +lucene = 9.8.0-snapshot-95cdd2e bundled_jdk_vendor = adoptium bundled_jdk = 20.0.2+9 diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..70baf1270cd5d --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 6eaa40708e4ae..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f8a34fc3d450343ab05ccb5af318a836a6a5fb3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c1daa91dd5433 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +57e2b0cca55da8ad856dfd60be42e6daabbc98c3 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 8a3332c950b6d..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fde64e3b23bc9a0849b9897febfe9f13c5113143 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..035b47c5f388c --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +0deb3b85eadf831be17b48acab0785fd9d34fc44 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 33c2afacf2395..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b01a791705fa01fce48dd02ea79fa8045de8dd5e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..6ff5a433f0a4e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +9a204267d68ce4ba36bfddc366cd6865cf5e1378 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e7986dafa11e..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43d19320b1b9cd18638b1602fa87d5f21ee043bc \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..a65ab33a31e2a --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +71e8e811f873ba2b47c7ecf9d890cbeac5b6be41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 14880d9c2d243..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9244dc232f175010b480d4d88e13945c17a0b28b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..04ab7b7e7adb8 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +6e1274273895365bd83391cc4b79f5264479f5de \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index edc4de3fffe28..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3101a4f79820c1ca3dfb8f49b74c5fb5b32940e1 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..cef3f97d03c51 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +e634c8685edad2bdb5c13748b18c0c1a46bb63a3 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 54c310277b09b..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f12b2a22cd5ebcd84f40a40e78fdd4e268b3b26d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..3e2dd19a9dd85 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +0afdf2afacbae39414ed06325fbb4bed17c07a7d \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 358db9ea3f0f5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7dbf5cc3dff93cc1ffe45d79b129859590d001dd \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..8c0544acd1ca0 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +166e2ea297182f7bf7070af02aacea9e6a3a19c8 \ No newline at end of file diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java index a1918f3c954d0..18c9dd222e2cf 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java @@ -8,6 +8,7 @@ package org.opensearch.plugin.correlation.core.index.mapper; +import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.KnnFloatVectorField; import org.apache.lucene.document.StoredField; @@ -23,8 +24,6 @@ import java.util.Locale; import java.util.Optional; -import static org.apache.lucene.index.FloatVectorValues.MAX_DIMENSIONS; - /** * Field mapper for the correlation vector type * @@ -32,7 +31,7 @@ */ public class CorrelationVectorFieldMapper extends VectorFieldMapper { - private static final int LUCENE_MAX_DIMENSION = MAX_DIMENSIONS; + private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS; private final FieldType vectorFieldType; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java index b3fcbedd74558..134c6519d7220 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -203,7 +203,7 @@ public void testDoToQueryInvalidFieldType() { /** * test serialization of Correlation Query Builder - * @throws Exception + * @throws Exception Exception */ public void testSerialization() throws Exception { assertSerialization(Optional.empty()); diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index e7c7dc2bbc046..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1446b7641743a1082b566179d1bf2960f5a0724b \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..4ac89f2e792d7 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8c82be3d997d781bb72d6d0eadade064dd2cd6db \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index d0f64519cd6ff..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -127032ea137d2501b24f0e35e5f9a2e1c7864633 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..624b5174a444f --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +4c261d17c681c0d91171c67e192abfef59adea2e \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..70baf1270cd5d --- /dev/null +++ b/server/licenses/lucene-core-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d2f7fbc5b2c49ca777a169d579f41082a9a57cc7 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 294beba43f62a..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6f742efe0ef3b383468fe38f88ab2dd69ed3d2c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..20ddb9ae3ef27 --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8d1cf3d6db43fad6630376ba59451f848f4d387c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index c2a2ef5b13946..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3162856444777130dee2c4cabe1bf6d18710ff63 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c3ad03ca53b13 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +83ab97638bb5269f950d75bba5675d3cfb63f2fa \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 7c6adaaba9cf1..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5fe8383516eca7300f978ce38042e327b0a57877 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c2a4c5334b314 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +97c26362151908dc892263edda3872abbacb71a8 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 586702c968a77..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3e77970485be6d2dd59b999bbaa65a2cb993744 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..32534d07e47dc --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +8337eddc0dddd0d7dd50c5aa0d17e5e31592f9fa \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 493598eefff5e..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86d667ea2f7fb2142d2acacf801dcea47d014a5e \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..7db245cc521c7 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +a2e3fae930295f0e2b401effe04eafc25692a414 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1bf937f10d795..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -930d004de698f374da8ac5530fd80e241edeba45 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..d01a6d733196e --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +e88d8a464e6cfa345b946c9c8822ba7ee2a9159f \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 74458bc93f90b..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f62882823d5aa9ed4cf0081a8c18f35e21992080 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c7b9640bad170 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +9905790675c01e8dc24f9a5e6b9b28b879c65a52 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 3231d0e067940..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1ec1527e283b423b7ff5e12cd8d889e7247199d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c4cd9e47624f8 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +d6c8be427ec8ffc7e8233ffbf0d190d95a56cf14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dd47faf91f206..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de787c052879893e47d21fa161c93413665d55d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..dfee145d3ea26 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +11716d61288feaa692593bf699affa8de2b564c4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 2b378438bfb14..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e541ed960a571f5d9a0ecff5c26fd5ca857581e \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..c7410086ba86c --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +3a888e06c0535403b9e58a8dcddeb5e6513a4930 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e3ed6561e3ef..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b222ef09a5f20896d031a8322f2e69304c16384 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 new file mode 100644 index 0000000000000..6d8d4205f4d02 --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0-snapshot-95cdd2e.jar.sha1 @@ -0,0 +1 @@ +52dfc8bf135ed29f5baf0a967c1bb63dedb9a069 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index ceacb028698de..c733329a1b5f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -108,8 +108,9 @@ public void testConcurrentSearchTaskTracking() { assertEquals(mainTaskInfo.getTaskId(), taskInfo.getParentTaskId()); Map> threadStats = getThreadStats(SearchAction.NAME + "[*]", taskInfo.getTaskId()); - // Concurrent search forks each slice of 5 segments to different thread - assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0), threadStats.size()); + // Concurrent search forks each slice of 5 segments to different thread (see please + // https://github.com/apache/lucene/issues/12498) + assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0) + 1, threadStats.size()); // assert that all task descriptions have non-zero length MatcherAssert.assertThat(taskInfo.getDescription().length(), greaterThan(0)); diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 417cdd5a8f030..c1f69d1ef3638 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -52,7 +52,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.KnnCollector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.set.Sets; @@ -271,12 +271,12 @@ public ByteVectorValues getByteVectorValues(String field) throws IOException { } @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, byte[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } @Override - public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, float[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java index 78b4a5f04c955..d46b34fe97356 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java @@ -86,14 +86,6 @@ public void copy(int slot, int doc) throws IOException { super.copy(slot, doc); } - @Override - protected boolean isMissingValueCompetitive() { - int result = missingValue.compareTo(bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - @Override protected void encodeBottom(byte[] packedValue) { BigIntegerPoint.encodeDimension(bottom, packedValue, 0); @@ -103,5 +95,15 @@ protected void encodeBottom(byte[] packedValue) { protected void encodeTop(byte[] packedValue) { BigIntegerPoint.encodeDimension(topValue, packedValue, 0); } + + @Override + protected int compareMissingValueWithBottomValue() { + return missingValue.compareTo(bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return missingValue.compareTo(topValue); + } } } From ed1c7ea58f54e480d21e472a5eb53961d1a489c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 16:09:49 -0700 Subject: [PATCH 16/23] Bump tibdex/github-app-token from 1.5.0 to 2.1.0 (#10125) * Bump tibdex/github-app-token from 1.5.0 to 2.1.0 Bumps [tibdex/github-app-token](https://github.com/tibdex/github-app-token) from 1.5.0 to 2.1.0. - [Release notes](https://github.com/tibdex/github-app-token/releases) - [Commits](https://github.com/tibdex/github-app-token/compare/v1.5.0...v2.1.0) --- updated-dependencies: - dependency-name: tibdex/github-app-token dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/auto-release.yml | 2 +- .github/workflows/backport.yml | 2 +- .github/workflows/create-documentation-issue.yml | 2 +- .github/workflows/dependabot_pr.yml | 2 +- .github/workflows/version.yml | 2 +- CHANGELOG.md | 1 + 6 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index ae41ae00d018d..252cbda1392f8 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 607c6de58b7f6..2a95177174e9b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -26,7 +26,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index 373eeb3569a51..df63847f8afca 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index f03a3d125067d..2a5e539b214d3 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -11,7 +11,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 2fe4fb1ac9d48..df785bcc70014 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -13,7 +13,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/CHANGELOG.md b/CHANGELOG.md index d374a725c84ee..9cb5d39c1a764 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) - Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) - Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) From 2f7969affddcd4aff41e07b2761d256373667c45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Wed, 20 Sep 2023 01:25:49 +0200 Subject: [PATCH 17/23] Remove outdated default index mapping error message (#7008) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This error message is no longer relevant and should not be used now. Closes: #7007 Signed-off-by: Lukáš Vlček --- .../main/java/org/opensearch/index/mapper/MapperService.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 93928a464b138..ecb965061a828 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -172,9 +172,6 @@ public enum MergeReason { ); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MapperService.class); - static final String DEFAULT_MAPPING_ERROR_MESSAGE = "[_default_] mappings are not allowed on new indices and should no " - + "longer be used. See [https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html" - + "#default-mapping-not-allowed] for more information."; private final IndexAnalyzers indexAnalyzers; From 9b5bf5fad4ba1463742ba1b4d89cbe81ff2e8fbf Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 20 Sep 2023 14:42:07 -0400 Subject: [PATCH 18/23] Update Github action for Gradle precommit on MacOS (#10140) Signed-off-by: Andriy Redko --- .github/workflows/precommit.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index c628f48e57eba..f4622859916c7 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -22,10 +22,6 @@ jobs: - name: Setup docker (missing on MacOS) if: runner.os == 'macos' run: | - # Workaround for https://github.com/actions/runner-images/issues/8104 - brew remove --ignore-dependencies qemu - curl -o ./qemu.rb https://raw.githubusercontent.com/Homebrew/homebrew-core/f88e30b3a23ef3735580f9b05535ce5a0a03c9e3/Formula/qemu.rb - brew install ./qemu.rb brew install docker colima start sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock From 0c4e95095e4d44540ea8a90aa5ca55fea1248366 Mon Sep 17 00:00:00 2001 From: Darshit Chanpura <35282393+DarshitChanpura@users.noreply.github.com> Date: Wed, 20 Sep 2023 14:49:18 -0400 Subject: [PATCH 19/23] Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 (#9822) * Bumps jetty version to 12.0.1 to fix GMS-2023-1857 Signed-off-by: Darshit Chanpura * Adds this PR to changelog Signed-off-by: Darshit Chanpura * Updates javax websocket server version Signed-off-by: Darshit Chanpura * Updates jetty version to match jetty websocket version Signed-off-by: Darshit Chanpura --------- Signed-off-by: Darshit Chanpura --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cb5d39c1a764..aef0f2ef03496 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 42de21875d291..5217089cf3b4c 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.51.v20230217' + 'jetty': '9.4.52.v20230823' ] dependencies { From b944194d08aa8635369038c94e9084bf87a3c6aa Mon Sep 17 00:00:00 2001 From: sahil <61558528+buddharajusahil@users.noreply.github.com> Date: Wed, 20 Sep 2023 14:41:44 -0700 Subject: [PATCH 20/23] Search latency tracking - Coordinator node (#8386) We are trying to add search stats to coordinator level node stats. This keeps track of the total time, current requests, and total requests of each request phase. Also added support for general coordinator stats as well on the node level. Signed-off-by: sahil buddharaju Signed-off-by: sahil <61558528+buddharajusahil@users.noreply.github.com> Signed-off-by: Sagar Upadhyaya Signed-off-by: Sagar <99425694+sgup432@users.noreply.github.com> Co-authored-by: sahil buddharaju Co-authored-by: Sagar Upadhyaya Co-authored-by: Sagar Upadhyaya Co-authored-by: Sagar <99425694+sgup432@users.noreply.github.com> --- CHANGELOG.md | 1 + .../search/SearchWeightedRoutingIT.java | 31 ++ .../search/stats/SearchStatsIT.java | 35 ++- .../search/AbstractSearchAsyncAction.java | 35 ++- .../search/CanMatchPreFilterSearchPhase.java | 6 +- .../SearchDfsQueryThenFetchAsyncAction.java | 8 +- .../opensearch/action/search/SearchPhase.java | 12 +- .../action/search/SearchPhaseContext.java | 2 + .../action/search/SearchPhaseName.java | 1 + .../SearchQueryThenFetchAsyncAction.java | 8 +- .../SearchRequestOperationsListener.java | 77 +++++ .../action/search/SearchRequestStats.java | 75 +++++ .../action/search/TransportSearchAction.java | 62 +++- .../common/metrics/CounterMetric.java | 1 + .../opensearch/common/metrics/MeanMetric.java | 1 + .../common/settings/ClusterSettings.java | 1 + .../index/search/stats/SearchStats.java | 136 ++++++++- .../opensearch/indices/IndicesService.java | 7 +- .../opensearch/indices/NodeIndicesStats.java | 7 +- .../main/java/org/opensearch/node/Node.java | 5 + .../cluster/node/stats/NodeStatsTests.java | 4 +- .../AbstractSearchAsyncActionTests.java | 273 +++++++++++++++++- .../CanMatchPreFilterSearchPhaseTests.java | 18 +- .../action/search/MockSearchPhaseContext.java | 5 + .../action/search/SearchAsyncActionTests.java | 16 +- .../SearchQueryThenFetchAsyncActionTests.java | 4 +- .../SearchRequestOperationsListenerTests.java | 69 +++++ .../search/SearchRequestStatsTests.java | 150 ++++++++++ .../index/search/stats/SearchStatsTests.java | 41 ++- .../indices/NodeIndicesStatsTests.java | 6 +- .../snapshots/SnapshotResiliencyTests.java | 4 +- 31 files changed, 1052 insertions(+), 49 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchRequestStats.java create mode 100644 server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java create mode 100644 server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index aef0f2ef03496..10205ae20c001 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) - Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) - Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 6fafdb0912470..5207dab83f1d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -16,6 +16,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -56,9 +57,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) @@ -74,6 +77,7 @@ public void testSearchWithWRRShardRouting() throws IOException { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") .put("cluster.routing.weighted.fail_open", false) + .put(SEARCH_REQUEST_STATS_ENABLED_KEY, true) .build(); logger.info("--> starting 6 nodes on different zones"); @@ -180,12 +184,39 @@ public void testSearchWithWRRShardRouting() throws IOException { assertFalse(!hitNodes.contains(nodeId)); } nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + int num = 0; + int coordNumber = 0; for (NodeStats stat : nodeStats.getNodes()) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (searchStats.getRequestStatsLongHolder() + .getRequestStatsHolder() + .get(SearchPhaseName.QUERY.getName()) + .getTimeInMillis() > 0) { + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal(), + greaterThan(0L) + ); + coordNumber += 1; + } Assert.assertTrue(searchStats.getQueryCount() > 0L); Assert.assertTrue(searchStats.getFetchCount() > 0L); + num++; } + assertThat(coordNumber, greaterThan(0)); + assertThat(num, greaterThan(0)); } private Map> setupCluster(int nodeCountPerAZ, Settings commonSettings) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index 23d48b173a3db..253a8b2b14824 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -37,6 +37,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -63,6 +64,7 @@ import java.util.Set; import java.util.function.Function; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -78,7 +80,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) public class SearchStatsIT extends ParameterizedOpenSearchIntegTestCase { public SearchStatsIT(Settings dynamicSettings) { @@ -126,6 +128,11 @@ public void testSimpleStats() throws Exception { assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10)); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(SEARCH_REQUEST_STATS_ENABLED_KEY, true).build()) + .get(); assertThat(numNodes, lessThanOrEqualTo(shardsIdx1 + shardsIdx2)); assertAcked( prepareCreate("test1").setSettings( @@ -188,20 +195,40 @@ public void testSimpleStats() throws Exception { Set nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; + int numOfCoordinators = 0; + for (NodeStats stat : nodeStats.getNodes()) { Stats total = stat.getIndices().getSearch().getTotal(); + if (total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTimeInMillis() > 0) { + assertThat( + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + numOfCoordinators += 1; + } if (nodeIdsWithIndex.contains(stat.getNode().getId())) { assertThat(total.getQueryCount(), greaterThan(0L)); assertThat(total.getQueryTimeInMillis(), greaterThan(0L)); num++; } else { - assertThat(total.getQueryCount(), equalTo(0L)); + assertThat(total.getQueryCount(), greaterThanOrEqualTo(0L)); assertThat(total.getQueryTimeInMillis(), equalTo(0L)); } } - + assertThat(numOfCoordinators, greaterThan(0)); assertThat(num, greaterThan(0)); - } private Set nodeIdsWithIndex(String... indices) { diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index ee8aa10577956..1c0a1280ad550 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -65,6 +65,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; @@ -107,7 +108,6 @@ abstract class AbstractSearchAsyncAction exten private final AtomicInteger skippedOps = new AtomicInteger(); private final TransportSearchAction.SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; - protected final GroupShardsIterator toSkipShardsIts; protected final GroupShardsIterator shardsIts; private final int expectedTotalOps; @@ -116,8 +116,12 @@ abstract class AbstractSearchAsyncAction exten private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private SearchPhase currentPhase; + private final List releasables = new ArrayList<>(); + private Optional searchRequestOperationsListener; + AbstractSearchAsyncAction( String name, Logger logger, @@ -135,7 +139,8 @@ abstract class AbstractSearchAsyncAction exten SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -171,6 +176,7 @@ abstract class AbstractSearchAsyncAction exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; + this.searchRequestOperationsListener = Optional.ofNullable(searchRequestOperationsListener); } @Override @@ -371,6 +377,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha : OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); + } else { Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; @@ -419,13 +426,24 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } + onPhaseEnd(); executePhase(nextPhase); } } + private void onPhaseEnd() { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseEnd(this); }); + } + + private void onPhaseStart(SearchPhase phase) { + setCurrentPhase(phase); + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseStart(this); }); + } + private void executePhase(SearchPhase phase) { try { - phase.run(); + onPhaseStart(phase); + phase.recordAndRun(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); @@ -603,6 +621,14 @@ private void successfulShardExecution(SearchShardIterator shardsIt) { } } + public SearchPhase getCurrentPhase() { + return currentPhase; + } + + private void setCurrentPhase(SearchPhase phase) { + currentPhase = phase; + } + @Override public final int getNumShards() { return results.getNumShards(); @@ -670,10 +696,13 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At } listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } + onPhaseEnd(); + setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> searchRequestOperations.onPhaseFailure(this)); raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index 6c3ee652de2de..ae481736ad0aa 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -90,7 +90,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -110,7 +111,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction(shardsIts.size()), request.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 50b0cd8e01c1d..1c7b3c1f1563c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -42,13 +42,23 @@ * * @opensearch.internal */ -abstract class SearchPhase implements CheckedRunnable { +public abstract class SearchPhase implements CheckedRunnable { private final String name; + private long startTimeInNanos; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + public long getStartTimeInNanos() { + return startTimeInNanos; + } + + public void recordAndRun() throws IOException { + this.startTimeInNanos = System.nanoTime(); + run(); + } + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 4ffd5521793f6..45d39a6f85ea2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -73,6 +73,8 @@ public interface SearchPhaseContext extends Executor { */ SearchRequest getRequest(); + SearchPhase getCurrentPhase(); + /** * Builds and sends the final search response back to the user. * diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index b6f842cf2cce1..4c0fe3ac06326 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -13,6 +13,7 @@ * @opensearch.internal */ public enum SearchPhaseName { + DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), FETCH("fetch"), DFS_QUERY("dfs_query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index f75ab2554e693..ca5ad087d3089 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -81,10 +81,11 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listeners; + private final Logger logger; + + public CompositeListener(List listeners, Logger logger) { + this.listeners = listeners; + this.logger = logger; + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseStart(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseEnd(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseFailure(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java new file mode 100644 index 0000000000000..ad299c11b987d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.metrics.MeanMetric; + +import java.util.EnumMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Request level search stats to track coordinator level node search latencies + * + * @opensearch.internal + */ +public final class SearchRequestStats implements SearchRequestOperationsListener { + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Inject + public SearchRequestStats() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseStatsMap.put(searchPhaseName, new StatsHolder()); + } + } + + public long getPhaseCurrent(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).current.count(); + } + + public long getPhaseTotal(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).total.count(); + } + + public long getPhaseMetric(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).timing.sum(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); + phaseStats.current.dec(); + phaseStats.total.inc(); + phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + + public static final class StatsHolder { + CounterMetric current = new CounterMetric(); + CounterMetric total = new CounterMetric(); + MeanMetric timing = new MeanMetric(); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 25ec0fc57d19f..cff1005beff27 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -67,6 +67,7 @@ import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; @@ -145,6 +146,14 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( + SEARCH_REQUEST_STATS_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -157,6 +166,10 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -185,6 +199,13 @@ public TransportSearchAction( this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; this.searchPipelineService = searchPipelineService; + this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); + this.searchRequestStats = searchRequestStats; + } + + private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { + this.isRequestStatsEnabled = isRequestStatsEnabled; } private Map buildPerIndexAliasFilter( @@ -311,6 +332,13 @@ public void executeRequest( SinglePhaseSearchAction phaseSearchAction, ActionListener listener ) { + final List searchListenersList = createSearchListenerList(); + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } executeRequest(task, searchRequest, new SearchAsyncActionProvider() { @Override public AbstractSearchAsyncAction asyncSearchAction( @@ -346,7 +374,8 @@ public AbstractSearchAsyncAction asyncSearchAction( task, new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ) { @Override protected void executePhaseOnShard( @@ -916,9 +945,7 @@ private void executeSearch( @Nullable SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider ) { - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api @@ -968,11 +995,8 @@ private void executeSearch( indexRoutings = routingMap; } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); - failIfOverShardCountLimit(clusterService, shardIterators.size()); - Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); - // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard @@ -1107,6 +1131,14 @@ AbstractSearchAsyncAction asyncSearchAction( ); } + private List createSearchListenerList() { + final List searchListenersList = new ArrayList<>(); + if (isRequestStatsEnabled) { + searchListenersList.add(searchRequestStats); + } + return searchListenersList; + } + private AbstractSearchAsyncAction searchAsyncAction( SearchTask task, SearchRequest searchRequest, @@ -1123,6 +1155,13 @@ private AbstractSearchAsyncAction searchAsyncAction ThreadPool threadPool, SearchResponse.Clusters clusters ) { + final List searchListenersList = createSearchListenerList(); + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } if (preFilter) { return new CanMatchPreFilterSearchPhase( logger, @@ -1162,7 +1201,8 @@ public void run() { } }; }, - clusters + clusters, + searchRequestOperationsListener ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1192,7 +1232,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; case QUERY_THEN_FETCH: @@ -1212,7 +1253,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; default: diff --git a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java index 5c48c1f772ff0..cb181840406a5 100644 --- a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java @@ -62,4 +62,5 @@ public void dec(long n) { public long count() { return counter.sum(); } + } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 33f12c8cb42d3..359facdce633b 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -79,4 +79,5 @@ public void clear() { counter.reset(); sum.reset(); } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 74224d66400da..22e65b2c04668 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -373,6 +373,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, + TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 76b216304b3b7..14aaf7e58a59c 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -33,6 +33,8 @@ package org.opensearch.index.search.stats; import org.opensearch.Version; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; @@ -58,10 +60,79 @@ public class SearchStats implements Writeable, ToXContentFragment { /** - * Statistics for search + * Holds statistic values for a particular phase. * * @opensearch.internal */ + public static class PhaseStatsLongHolder implements Writeable { + + long current; + long total; + long timeInMillis; + + public long getCurrent() { + return current; + } + + public long getTotal() { + return total; + } + + public long getTimeInMillis() { + return timeInMillis; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(current); + out.writeVLong(total); + out.writeVLong(timeInMillis); + } + + PhaseStatsLongHolder() { + this(0, 0, 0); + } + + PhaseStatsLongHolder(long current, long total, long timeInMillis) { + this.current = current; + this.total = total; + this.timeInMillis = timeInMillis; + } + + PhaseStatsLongHolder(StreamInput in) throws IOException { + this.current = in.readVLong(); + this.total = in.readVLong(); + this.timeInMillis = in.readVLong(); + } + + } + + /** + * Holds requests stats for different phases. + * + * @opensearch.internal + */ + public static class RequestStatsLongHolder { + + Map requestStatsHolder = new HashMap<>(); + + public Map getRequestStatsHolder() { + return requestStatsHolder; + } + + RequestStatsLongHolder() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + requestStatsHolder.put(searchPhaseName.getName(), new PhaseStatsLongHolder()); + } + } + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + public static class Stats implements Writeable, ToXContentFragment { private long queryCount; @@ -89,6 +160,13 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + @Nullable + private RequestStatsLongHolder requestStatsLongHolder; + + public RequestStatsLongHolder getRequestStatsLongHolder() { + return requestStatsLongHolder; + } + private Stats() { // for internal use, initializes all counts to 0 } @@ -114,6 +192,7 @@ public Stats( long suggestTimeInMillis, long suggestCurrent ) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; this.queryTimeInMillis = queryTimeInMillis; this.queryCurrent = queryCurrent; @@ -163,6 +242,10 @@ private Stats(StreamInput in) throws IOException { pitCurrent = in.readVLong(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); + requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); + } if (in.getVersion().onOrAfter(Version.V_2_10_0)) { concurrentQueryCount = in.readVLong(); concurrentQueryTimeInMillis = in.readVLong(); @@ -354,6 +437,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitCurrent); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (requestStatsLongHolder == null) { + requestStatsLongHolder = new RequestStatsLongHolder(); + } + out.writeMap( + requestStatsLongHolder.getRequestStatsHolder(), + StreamOutput::writeString, + (stream, stats) -> stats.writeTo(stream) + ); + } + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeVLong(concurrentQueryCount); out.writeVLong(concurrentQueryTimeInMillis); @@ -391,6 +485,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + if (requestStatsLongHolder != null) { + builder.startObject(Fields.REQUEST); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + PhaseStatsLongHolder statsLongHolder = requestStatsLongHolder.requestStatsHolder.get(searchPhaseName.getName()); + if (statsLongHolder == null) { + continue; + } + builder.startObject(searchPhaseName.getName()); + builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(statsLongHolder.timeInMillis)); + builder.field(Fields.CURRENT, statsLongHolder.current); + builder.field(Fields.TOTAL, statsLongHolder.total); + builder.endObject(); + } + builder.endObject(); + } return builder; } } @@ -405,6 +515,24 @@ public SearchStats() { totalStats = new Stats(); } + // Set the different Request Stats fields in here + public void setSearchRequestStats(SearchRequestStats searchRequestStats) { + if (totalStats.requestStatsLongHolder == null) { + totalStats.requestStatsLongHolder = new RequestStatsLongHolder(); + } + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + totalStats.requestStatsLongHolder.requestStatsHolder.put( + searchPhaseName.getName(), + new PhaseStatsLongHolder( + searchRequestStats.getPhaseCurrent(searchPhaseName), + searchRequestStats.getPhaseTotal(searchPhaseName), + searchRequestStats.getPhaseMetric(searchPhaseName) + ) + ); + } + } + public SearchStats(Stats totalStats, long openContexts, @Nullable Map groupStats) { this.totalStats = totalStats; this.openContexts = openContexts; @@ -520,6 +648,12 @@ static final class Fields { static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; static final String SUGGEST_CURRENT = "suggest_current"; + static final String REQUEST = "request"; + static final String TIME_IN_MILLIS = "time_in_millis"; + static final String TIME = "time"; + static final String CURRENT = "current"; + static final String TOTAL = "total"; + } @Override diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 2ed3cb8d9e8ea..cf64b886ed523 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -47,6 +47,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -333,6 +334,8 @@ public class IndicesService extends AbstractLifecycleComponent private volatile TimeValue clusterRemoteTranslogBufferInterval; private final FileCacheCleaner fileCacheCleaner; + private final SearchRequestStats searchRequestStats; + @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically @@ -363,6 +366,7 @@ public IndicesService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier repositoriesServiceSupplier, FileCacheCleaner fileCacheCleaner, + SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this.settings = settings; @@ -453,6 +457,7 @@ protected void closeInternal() { clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.searchRequestStats = searchRequestStats; this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); @@ -576,7 +581,7 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { } } - return new NodeIndicesStats(commonStats, statsByShard(this, flags)); + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); } Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { diff --git a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java index cc3d8193dfa6b..8a7aaba2726f4 100644 --- a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -71,7 +72,6 @@ * @opensearch.internal */ public class NodeIndicesStats implements Writeable, ToXContentFragment { - private CommonStats stats; private Map> statsByShard; @@ -92,7 +92,7 @@ public NodeIndicesStats(StreamInput in) throws IOException { } } - public NodeIndicesStats(CommonStats oldStats, Map> statsByShard) { + public NodeIndicesStats(CommonStats oldStats, Map> statsByShard, SearchRequestStats searchRequestStats) { // this.stats = stats; this.statsByShard = statsByShard; @@ -105,6 +105,9 @@ public NodeIndicesStats(CommonStats oldStats, Map> } } } + if (this.stats.search != null) { + this.stats.search.setSearchRequestStats(searchRequestStats); + } } @Nullable diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1f8f17f8e8d91..90fb339951d62 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -46,6 +46,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; @@ -761,6 +762,8 @@ protected Node( threadPool ); + final SearchRequestStats searchRequestStats = new SearchRequestStats(); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); final IndicesService indicesService = new IndicesService( settings, @@ -786,6 +789,7 @@ protected Node( remoteDirectoryFactory, repositoriesServiceReference::get, fileCacheCleaner, + searchRequestStats, remoteStoreStatsTrackerFactory ); @@ -1199,6 +1203,7 @@ protected Node( b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); + b.bind(SearchRequestStats.class).toInstance(searchRequestStats); b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); }); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 672ebea8d01f9..e3f16463a5328 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; @@ -801,8 +802,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { NodeIndicesStats indicesStats = null; if (remoteStoreStats) { - indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>()); - + indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>(), new SearchRequestStats()); RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); remoteSegmentStats.addUploadBytesStarted(10L); remoteSegmentStats.addUploadBytesSucceeded(10L); diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 705479ec21fc1..f628bb3201452 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -38,8 +38,12 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.set.Sets; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -51,7 +55,10 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -65,6 +72,7 @@ import java.util.UUID; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -77,18 +85,21 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List> resolvedNodes = new ArrayList<>(); private final Set releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + ThreadPool threadPool; @Before @Override public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); + threadPool = new TestThreadPool(getClass().getName()); } @After @@ -97,6 +108,7 @@ public void tearDown() throws Exception { super.tearDown(); executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); } private AbstractSearchAsyncAction createAction( @@ -126,6 +138,7 @@ private AbstractSearchAsyncAction createAction( final AtomicLong expected, final SearchShardIterator... shards ) { + final Runnable runnable; final TransportSearchAction.SearchTimeProvider timeProvider; if (controlled) { @@ -161,7 +174,8 @@ private AbstractSearchAsyncAction createAction( null, results, request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -313,6 +327,53 @@ public void testSendSearchResponseDisallowPartialFailures() { assertEquals(requestIds, releasedContexts); } + public void testOnPhaseFailureAndVerifyListeners() { + SearchRequestStats testListener = new SearchRequestStats(); + + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + } + public void testOnPhaseFailure() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference exception = new AtomicReference<>(); @@ -321,6 +382,7 @@ public void testOnPhaseFailure() { List> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { @@ -528,6 +590,215 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + long delay = (randomIntBetween(1, 5)); + delay = delay * 10; + + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + + // Verify queryPhase current metric + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + + // Verify queryPhase total, current and latency metrics + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(action.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseName())); + + // Verify fetchPhase current metric + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + ExpandSearchPhase expandPhase = createExpandSearchPhase(); + action.executeNextPhase(fetchPhase, expandPhase); + TimeUnit.MILLISECONDS.sleep(delay); + + // Verify fetchPhase total, current and latency metrics + assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + + assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + + action.executeNextPhase(expandPhase, fetchPhase); + + action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); + assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + } + + public void testOnPhaseListenersWithDfsType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + long delay = (randomIntBetween(1, 5)); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + + searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); + searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + + assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + } + + private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + + return new SearchDfsQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + null, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ); + } + + private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + return new SearchQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + null, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ) { + @Override + ShardSearchFailure[] buildShardFailures() { + return ShardSearchFailure.EMPTY_ARRAY; + } + + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + start(); + } + }; + } + + private FetchSearchPhase createFetchSearchPhase() { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + QueryPhaseResultConsumer results = controller.newSearchPhaseResults( + OpenSearchExecutors.newDirectExecutorService(), + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ); + return new FetchSearchPhase( + results, + controller, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + } + + private ExpandSearchPhase createExpandSearchPhase() { + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(null, null, null, null, false, null, 1); + return new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, null); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 45f00a8418d5c..43029fe57d5dd 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -136,7 +136,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -227,7 +228,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -317,7 +319,8 @@ public void sendCanMatch( null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -344,7 +347,8 @@ protected void executePhaseOnShard( } } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -428,7 +432,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -527,7 +532,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index e078b4a467e91..b5e1050b968ee 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -99,6 +99,11 @@ public SearchRequest getRequest() { return searchRequest; } + @Override + public SearchPhase getCurrentPhase() { + return null; + } + @Override public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index 830fa99f90bb9..4b94b6589c6c8 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -135,7 +135,8 @@ public void testSkipSearchShards() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -253,7 +254,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -370,7 +372,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -492,7 +495,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -605,9 +609,9 @@ public void testAllowPartialResults() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { - @Override protected void executePhaseOnShard( SearchShardIterator shardIt, diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 0e2780c195cb8..6a22a7ea2b5e4 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -214,7 +214,8 @@ public void sendExecuteQuery( timeProvider, null, task, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { @@ -226,6 +227,7 @@ public void run() { }; } }; + action.start(); latch.await(); assertThat(successfulOps.get(), equalTo(numShards)); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java new file mode 100644 index 0000000000000..ef880043e863c --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestOperationsListenerTests extends OpenSearchTestCase { + + public void testListenersAreExecuted() { + Map searchPhaseMap = new HashMap<>(); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + searchPhaseMap.put(searchPhaseName, new SearchRequestStats.StatsHolder()); + } + SearchRequestOperationsListener testListener = new SearchRequestOperationsListener() { + + @Override + public void onPhaseStart(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).total.inc(); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + }; + + int totalListeners = randomIntBetween(1, 10); + final List requestOperationListeners = new ArrayList<>(); + for (int i = 0; i < totalListeners; i++) { + requestOperationListeners.add(testListener); + } + + SearchRequestOperationsListener compositeListener = new SearchRequestOperationsListener.CompositeListener( + requestOperationListeners, + logger + ); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase searchPhase = mock(SearchPhase.class); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(ctx.getCurrentPhase()).thenReturn(searchPhase); + when(searchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + compositeListener.onPhaseStart(ctx); + assertEquals(totalListeners, searchPhaseMap.get(searchPhaseName).current.count()); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java new file mode 100644 index 0000000000000..f24147a8194b4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestStatsTests extends OpenSearchTestCase { + public void testSearchRequestPhaseFailure() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testRequestStats.onPhaseStart(ctx); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseFailure(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStats() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + testRequestStats.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseEnd(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + assertEquals(1, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat(testRequestStats.getPhaseMetric(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } + + public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + Map searchPhaseNameLongMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseEnd(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + searchPhaseNameLongMap.put(searchPhaseName, tookTimeInMillis); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat( + testRequestStats.getPhaseMetric(searchPhaseName), + greaterThanOrEqualTo((searchPhaseNameLongMap.get(searchPhaseName) * numTasks)) + ); + } + } + + public void testSearchRequestStatsOnPhaseFailureConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseFailure(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index f65b0e231b1dd..c27e4bf27327a 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -32,11 +32,20 @@ package org.opensearch.index.search.stats; +import org.opensearch.action.search.SearchPhase; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SearchStatsTests extends OpenSearchTestCase { @@ -63,6 +72,37 @@ public void testShardLevelSearchGroupStats() throws Exception { // adding again would then return wrong search stats (would return 4! instead of 3) searchStats1.add(searchStats2); assertStats(groupStats1.get("group1"), 3); + + long paramValue = randomIntBetween(2, 50); + + // Testing for request stats + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int iterator = 0; iterator < paramValue; iterator++) { + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseEnd(ctx); + } + } + searchStats1.setSearchRequestStats(testRequestStats); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals( + 0, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).current + ); + assertEquals( + paramValue, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).total + ); + assertThat( + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).timeInMillis, + greaterThanOrEqualTo(paramValue) + ); + } } private static void assertStats(Stats stats, long equalTo) { @@ -87,5 +127,4 @@ private static void assertStats(Stats stats, long equalTo) { // avg_concurrency is not summed up across stats assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } - } diff --git a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java index 9be45d4e77940..6f36d22b7e17b 100644 --- a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; @@ -43,7 +45,9 @@ public class NodeIndicesStatsTests extends OpenSearchTestCase { public void testInvalidLevel() { - final NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap()); + CommonStats oldStats = new CommonStats(); + SearchRequestStats requestStats = new SearchRequestStats(); + final NodeIndicesStats stats = new NodeIndicesStats(oldStats, Collections.emptyMap(), requestStats); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 3c31c979ce856..2b432906ee128 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2069,6 +2069,7 @@ public void onFailure(final Exception e) { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, fileCacheCleaner, + null, new RemoteStoreStatsTrackerFactory(clusterService, settings) ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); @@ -2297,7 +2298,8 @@ public void onFailure(final Exception e) { namedWriteableRegistry, List.of(), client - ) + ), + null ) ); actions.put( From 9f0e01743de1bf23264945d54d0d5ab20593020b Mon Sep 17 00:00:00 2001 From: Sorabh Date: Wed, 20 Sep 2023 20:32:00 -0700 Subject: [PATCH 21/23] Fix NPE in ConcurrentQueryProfile while computing the breakdown map for slices (#10111) * Fix NPE in ConcurrentQueryProfile while computing the breakdown map for slices. There can be cases where one or more slice may not have timing related information for its leaves in contexts map. During creation of slice and query level breakdown map it needs to handle such cases by using default values correctly. Also updating the min/max/avg sliceNodeTime to not include time to create weight and wait times by slice threads. It will reflect the min/max/avg execution time of each slice whereas totalNodeTime will reflect the total query time. Signed-off-by: Sorabh Hamirwasia * Address review comments Signed-off-by: Sorabh Hamirwasia --------- Signed-off-by: Sorabh Hamirwasia --- .../ConcurrentQueryProfileBreakdown.java | 84 +++++++++++--- .../ConcurrentQueryProfileBreakdownTests.java | 103 ++++++++++++++---- 2 files changed, 147 insertions(+), 40 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index f37d53737d52b..e567fdd2d436c 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; @@ -73,14 +74,14 @@ public Map toBreakdownMap() { // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the // create_weight time/count queryNodeTime = createWeightTime; - maxSliceNodeTime = queryNodeTime; - minSliceNodeTime = queryNodeTime; - avgSliceNodeTime = queryNodeTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; return buildDefaultQueryBreakdownMap(createWeightTime); } // first create the slice level breakdowns - final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(createWeightStartTime); + final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(); return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); } @@ -124,18 +125,19 @@ private Map buildDefaultQueryBreakdownMap(long createWeightTime) { /** * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. - * @param createWeightStartTime start time when createWeight is called + * * @return map of collector (or slice) to breakdown map */ - Map> buildSliceLevelBreakdown(long createWeightStartTime) { + Map> buildSliceLevelBreakdown() { final Map> sliceLevelBreakdowns = new HashMap<>(); - long totalSliceNodeTime = 0; + long totalSliceNodeTime = 0L; for (Map.Entry> slice : sliceCollectorsToLeaves.entrySet()) { final Collector sliceCollector = slice.getKey(); // initialize each slice level breakdown final Map currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); // max slice end time across all timing types long sliceMaxEndTime = Long.MIN_VALUE; + long sliceMinStartTime = Long.MAX_VALUE; for (QueryTimingType timingType : QueryTimingType.values()) { if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { // do nothing for create weight as that is query level time and not slice level @@ -155,6 +157,12 @@ Map> buildSliceLevelBreakdown(long createWeightStar // leaf, but the leaf level breakdown will not be created in the contexts map. // This is because before updating the contexts map, the query hits earlyTerminationException. // To handle such case, we will ignore the leaf that is not present. + // + // Other than early termination, it can also happen in other cases. For example: there is a must boolean query + // with 2 boolean clauses. While creating scorer for first clause if no docs are found for the field in a leaf + // context then it will return null scorer. Then for 2nd clause weight as well no scorer will be created for this + // leaf context (as it is a must query). Due to this it will end up missing the leaf context in the contexts map + // for second clause weight. continue; } final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); @@ -182,15 +190,36 @@ Map> buildSliceLevelBreakdown(long createWeightStar ); } // compute sliceMaxEndTime as max of sliceEndTime across all timing types - sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.get(timingTypeSliceEndTimeKey)); + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); + sliceMinStartTime = Math.min( + sliceMinStartTime, + currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE) + ); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( timingType.toString(), - currentSliceBreakdown.get(timingTypeSliceEndTimeKey) - currentSliceBreakdown.get(timingTypeSliceStartTimeKey) + currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( + timingTypeSliceStartTimeKey, + 0L + ) ); } - // currentSliceNodeTime includes the create weight time as well which will be same for all the slices - long currentSliceNodeTime = sliceMaxEndTime - createWeightStartTime; + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part + long currentSliceNodeTime; + if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { + currentSliceNodeTime = 0L; + } else if (sliceMinStartTime == Long.MAX_VALUE || sliceMaxEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected value of sliceMinStartTime [" + + sliceMinStartTime + + "] or sliceMaxEndTime [" + + sliceMaxEndTime + + "] while computing the slice level timing profile breakdowns" + ); + } else { + currentSliceNodeTime = sliceMaxEndTime - sliceMinStartTime; + } + // compute max/min slice times maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); @@ -245,8 +274,8 @@ public Map buildQueryBreakdownMap( // for all other timing types, we will compute min/max/avg/total across slices for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { - Long sliceBreakdownTypeTime = sliceBreakdown.getValue().get(timingTypeKey); - Long sliceBreakdownTypeCount = sliceBreakdown.getValue().get(timingTypeCountKey); + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); // compute max/min/avg TimingType time across slices queryBreakdownMap.compute( maxBreakdownTypeTime, @@ -276,17 +305,38 @@ public Map buildQueryBreakdownMap( ); // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType - queryTimingTypeEndTime = Math.max(queryTimingTypeEndTime, sliceBreakdown.getValue().get(sliceEndTimeForTimingType)); - queryTimingTypeStartTime = Math.min(queryTimingTypeStartTime, sliceBreakdown.getValue().get(sliceStartTimeForTimingType)); + queryTimingTypeEndTime = Math.max( + queryTimingTypeEndTime, + sliceBreakdown.getValue().getOrDefault(sliceEndTimeForTimingType, Long.MIN_VALUE) + ); + queryTimingTypeStartTime = Math.min( + queryTimingTypeStartTime, + sliceBreakdown.getValue().getOrDefault(sliceStartTimeForTimingType, Long.MAX_VALUE) + ); queryTimingTypeCount += sliceBreakdownTypeCount; } + + if (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected timing type [" + + timingTypeKey + + "] start [" + + queryTimingTypeStartTime + + "] or end time [" + + queryTimingTypeEndTime + + "] computed across slices for profile results" + ); + } queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); - queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); - queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0 : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); // compute query end time using max of query end time across all timing types queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); } + if (queryEndTime == Long.MIN_VALUE) { + throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); + } queryNodeTime = queryEndTime - createWeightStartTime; return queryBreakdownMap; } diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index 065c90b262e11..f29ba3b0cea07 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -81,9 +81,9 @@ public void testBreakdownMapWithNoLeafContext() throws Exception { ); // verify total/min/max/avg node time is same as weight time assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(createWeightTime, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); continue; } assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); @@ -103,16 +103,15 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); final Collector sliceCollector = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap ); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(1, sliceBreakdownMap.size()); assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); @@ -141,9 +140,9 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) ); } - assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -154,8 +153,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 10, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -168,9 +168,7 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); - final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown( - createWeightEarliestStartTime - ); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); assertEquals(2, sliceBreakdownMap.size()); @@ -208,9 +206,9 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { } } - assertEquals(50, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(35, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } @@ -221,8 +219,9 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { final Collector sliceCollector_1 = mock(Collector.class); final Collector sliceCollector_2 = mock(Collector.class); final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); - final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEarliestStartTime + 10, 10, 1); - final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEarliestStartTime + 40, 20, 1); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( QueryTimingType.class, leafProfileBreakdownMap_1 @@ -269,9 +268,67 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); } - assertEquals(60, testQueryProfileBreakdown.getMaxSliceNodeTime()); - assertEquals(20, testQueryProfileBreakdown.getMinSliceNodeTime()); - assertEquals(40, testQueryProfileBreakdown.getAvgSliceNodeTime()); + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(15, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + // leaf2 profile breakdown is not present in contexts map + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(10, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(5, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + // min of 0 means one of the slice didn't worked on any leaf context + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(5, testQueryProfileBreakdown.getAvgSliceNodeTime()); directoryReader.close(); directory.close(); } From ef343d769439f510aba6f20b7746bbcf5f42e377 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 21 Sep 2023 06:20:23 -0700 Subject: [PATCH 22/23] Bump version of jgit to 6.7.0.202309050840-r to fix CVE-2023-4759 (#10147) * change dependency version of jgit Signed-off-by: Poojita Raj * add changelog Signed-off-by: Poojita Raj --------- Signed-off-by: Poojita Raj --- CHANGELOG.md | 3 ++- buildSrc/build.gradle | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10205ae20c001..18b675c264718 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -110,4 +111,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3dc689795151b..8fbd7cf7770fb 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -133,6 +133,10 @@ dependencies { integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } + implementation('org.ajoberstar.grgit:grgit-core:5.2.0') { + exclude group: 'org.eclipse.jgit', module: 'org.eclipse.jgit' + } + implementation 'org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r' } configurations.all { From 7dca3ca82576cd1e16ba23aabf3d781aa35d210b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:51:05 -0400 Subject: [PATCH 23/23] Bump com.github.tomakehurst:wiremock-jre8-standalone from 2.35.0 to 3.0.1 in /buildSrc (#9752) * Bump com.github.tomakehurst:wiremock-jre8-standalone in /buildSrc Bumps [com.github.tomakehurst:wiremock-jre8-standalone](https://github.com/wiremock/wiremock) from 2.35.0 to 3.0.1. - [Release notes](https://github.com/wiremock/wiremock/releases) - [Commits](https://github.com/wiremock/wiremock/compare/2.35.0...3.0.1) --- updated-dependencies: - dependency-name: com.github.tomakehurst:wiremock-jre8-standalone dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Bump org.wiremock:wiremock-standalone in /buildSrc Signed-off-by: Andriy Redko --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18b675c264718..8bdc8b665217d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -93,6 +93,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) - Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) - Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) ### Changed - Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 8fbd7cf7770fb..d7bdd09ea882e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -128,7 +128,7 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.35.0' + testImplementation 'org.wiremock:wiremock-standalone:3.1.0' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy"